From 6fe4cc435c42f408d7c64654c653869cd65444a4 Mon Sep 17 00:00:00 2001 From: Lionel RIGOUX Date: Thu, 30 Aug 2018 10:57:40 +0200 Subject: [PATCH] Refactor - ! Deprecation of 'options.binomial' flag. Use 'options.sources.type' instead (same flag values). - Major restructuring of the toolbox: - rationalisation of the folders - systematic renaming with the 'VBA_' prefix (with backward compatibility) - thematic reorganisation of the demos - better encapsulation of third party code - Performance tuning, in particular of the display functions. Inversion should be much faster now... - Refactoring of numerous functions, with improved documentation. In particular: - all sigmoid functions now captured by VBA_sigmoid - unified random number generation with VBA_random - Smarter defaults, eg. when guessing model or source dimensions. - Cleaner display, normalised across Matlab version - Initial set of unit tests to ensure code stability - Lots of cleaning and documenting - Minor bug fixes --- README.md | 70 +++- VBA_BMA.m | 186 ++++++---- VBA_EKF.m | 18 +- VBA_Iphi_extended.m | 2 +- VBA_JensenShannon.m | 82 ----- VBA_LMEH0.m | 2 +- ...eDensity.m => VBA_MCMC_predictiveDensity.m | 32 +- ...ty_fb.m => VBA_MCMC_predictiveDensity_fb.m | 32 +- VBA_MFX.m | 10 +- VBA_NLStateSpaceModel.m | 34 +- VBA_ReDisplay.m | 20 +- VBA_designEfficiency.m | 12 +- VBA_disp.m | 19 - VBA_getISqrtMat.m | 34 -- VBA_getKernels.m | 82 ----- VBA_getLaplace.m | 6 +- VBA_get_dL.m | 40 --- VBA_groupBMC.m | 19 +- VBA_groupBMC_btwConds.m | 10 +- VBA_hyperparameters.m | 15 +- VBA_optimPriors.m | 4 +- VBA_sample.m | 92 ----- VBA_setup.m | 1 - simulateNLSS.m => VBA_simulate.m | 102 +++--- VBA_unit_tests.m | 42 --- VBA_version.m | 11 +- checkGX_binomial.m | 16 - VBA_FreeEnergy.m => core/VBA_FreeEnergy.m | 6 +- VBA_GN.m => core/VBA_GN.m | 83 +++-- VBA_Hpost.m => core/VBA_Hpost.m | 0 VBA_IX0.m => core/VBA_IX0.m | 6 +- VBA_IX_lagged.m => core/VBA_IX_lagged.m | 8 +- .../VBA_IX_lagged_binomial.m | 8 +- VBA_Initialize.m => core/VBA_Initialize.m | 2 +- VBA_Iphi.m => core/VBA_Iphi.m | 10 +- VBA_Iphi_UNL.m => core/VBA_Iphi_UNL.m | 16 +- .../VBA_Iphi_binomial.m | 10 +- core/VBA_Iphi_extended.m | 166 +++++++++ VBA_Iphi_split.m => core/VBA_Iphi_split.m | 12 +- VBA_Itheta.m => core/VBA_Itheta.m | 8 +- VBA_VarParam.m => core/VBA_VarParam.m | 0 VBA_check.m => core/VBA_check.m | 50 ++- VBA_check4DCM.m => core/VBA_check4DCM.m | 0 VBA_check_errors.m => core/VBA_check_errors.m | 4 +- core/VBA_disp.m | 36 ++ VBA_evalFun.m => core/VBA_evalFun.m | 23 +- VBA_fillInPriors.m => core/VBA_fillInPriors.m | 0 VBA_getDefaults.m => core/VBA_getDefaults.m | 2 +- VBA_getU.m => core/VBA_getU.m | 2 +- core/VBA_get_dL.m | 66 ++++ VBA_microTime.m => core/VBA_microTime.m | 4 +- .../VBA_multisession_expand.m | 5 +- .../VBA_multisession_factor.m | 0 VBA_odeLim.m => core/VBA_odeLim.m | 0 VBA_odeLim2NLSS.m => core/VBA_odeLim2NLSS.m | 0 .../VBA_onlineWrapper.m | 25 +- VBA_priors.m => core/VBA_priors.m | 2 +- VBA_wrapup.m => core/VBA_wrapup.m | 2 +- .../diagnostics/VBA_getDiagnostics.m | 8 +- VBA_fit.m => core/diagnostics/VBA_getFit.m | 54 +-- .../diagnostics/VBA_getSuffStat.m | 6 +- .../diagnostics/VBA_getVolterraKernels.m | 34 +- .../display}/MoveAxisToOrigin.m | 0 .../display}/Plot3AxisAtOrigin.m | 0 .../display}/PlotAxisAtOrigin.m | 0 VBA_Bin2Cont.m => core/display/VBA_Bin2Cont.m | 0 .../display/VBA_classification_display.m | 2 +- .../display/VBA_displayGrads.m | 4 +- .../display/VBA_displayGroupBMC.m | 2 +- .../display/VBA_displayGroupBMCbtw.m | 12 +- .../display/VBA_displayMFX.m | 2 +- core/display/VBA_figure.m | 17 + .../display/VBA_initDisplay.m | 10 +- VBA_pause.m => core/display/VBA_pause.m | 16 +- VBA_summary.m => core/display/VBA_summary.m | 13 +- .../display/VBA_summaryMFX.m | 32 +- {stats&plots => core/display}/VBA_title.m | 0 .../display/VBA_updateDisplay.m | 210 ++++++++--- .../display}/displayResults.m | 36 +- .../display}/displaySimulations.m | 4 +- {stats&plots => core/display}/getColors.m | 0 {stats&plots => core/display}/getPanel.m | 0 {stats&plots => core/display}/plotDensity.m | 17 +- {stats&plots => core/display}/plotElipse.m | 0 {stats&plots => core/display}/plotGraph3D.m | 2 +- .../display}/plotUncertainTimeSeries.m | 71 +++- {stats&plots => core/display}/plotVolterra.m | 0 {stats&plots => core/display}/rotateXLabels.m | 0 .../sugar/getStateParamInput.m | 0 .../sugar}/priorPrettifyer.m | 0 {subfunctions => core/sugar}/priorUglyfier.m | 0 setInput.m => core/sugar/setInput.m | 0 setPriors.m => core/sugar/setPriors.m | 0 .../VBA_FreeEnergy_UNL.m | 2 +- .../unormalizedLikelihood/VBA_UNLtemp.m | 2 +- .../unormalizedLikelihood/VBA_evalAL.m | 10 +- .../unormalizedLikelihood/VBA_evalAL2.m | 12 +- demos/0_basics/demo_dynamicalSystem.m | 220 ++++++++++++ demos/0_basics/demo_excludeData.m | 83 +++++ demos/0_basics/demo_modelComparison.m | 107 ++++++ demos/0_basics/demo_multisession.m | 103 ++++++ demos/0_basics/demo_sources.m | 164 +++++++++ demos/0_basics/demo_staticModel.m | 79 ++++ .../1_advanced}/demo_MCsampling.m | 7 +- {subfunctions => demos/1_advanced}/demo_MFX.m | 70 +++- {subfunctions => demos/1_advanced}/demo_RFX.m | 4 +- demos/1_advanced/demo_VolterraKernels.m | 81 +++++ .../1_advanced}/demo_delays.m | 4 +- demos/1_advanced/demo_designOptimization.m | 297 +++++++++++++++ demos/1_advanced/demo_noiseAR1.m | 156 ++++++++ demos/1_advanced/demo_stochasticModel.m | 77 ++++ .../1_advanced}/demo_susceptibility.m | 2 +- .../1_advanced}/demo_trainTest.m | 6 +- .../2_statistics/VBA_LinDecomp.m | 6 +- .../2_statistics}/demo_CI.m | 6 +- demos/{statistics => 2_statistics}/demo_GLM.m | 0 .../2_statistics}/demo_GLM_missingData.m | 2 +- .../2_statistics}/demo_KalmanSmoother.m | 2 +- .../2_statistics}/demo_LinDecomp.m | 0 .../2_statistics}/demo_RFT.m | 2 +- .../2_statistics}/demo_RFT_GLM.m | 2 +- .../2_statistics}/demo_covComp.m | 4 +- .../2_statistics}/demo_generalizability.m | 6 +- .../2_statistics}/demo_groupbtw.m | 32 +- .../demo_logisticRegression.m | 6 +- .../2_statistics}/demo_mediation.m | 0 .../2_statistics}/demo_redundancy.m | 2 +- demos/2_statistics/demo_sparsityPrior.m | 178 +++++++++ .../3_behavioural}/demo_2DChoices.m | 16 +- .../3_behavioural}/demo_AVL_recog.m | 16 +- demos/3_behavioural/demo_BSL.m | 121 +++++++ .../demo_Qlearning.m | 9 +- demos/3_behavioural/demo_QlearningAsymetric.m | 236 ++++++++++++ .../demo_QlearningSimulation.m | 11 +- .../3_behavioural}/demo_ToMgames.m | 0 .../3_behavioural}/demo_VBfree.m | 6 +- .../3_behavioural}/demo_asymRW.m | 23 +- .../3_behavioural}/demo_discounting.m | 4 +- .../3_behavioural}/demo_dynLearningRate.m | 18 +- .../3_behavioural}/demo_influenceLearning.m | 6 +- .../3_behavioural}/demo_recur.m | 4 +- .../3_behavioural}/demo_volatileVB.m | 6 +- .../3_behavioural}/demo_wsls.m | 8 +- {subfunctions => demos/4_neural}/compHRFs.m | 0 .../4_neural}/demo_2DneuralField.m | 6 +- .../4_neural}/demo_CaBBI_FHN.m | 0 .../4_neural}/demo_CaBBI_QGIF.m | 0 {subfunctions => demos/4_neural}/demo_FHN.m | 4 +- {subfunctions => demos/4_neural}/demo_HRF.m | 4 +- .../4_neural}/demo_HRF_distributed.m | 6 +- .../4_neural}/demo_HRF_dummy.m | 0 demos/4_neural/demo_HodgkinHuxley.m | 114 ++++++ {subfunctions => demos/4_neural}/demo_PSP.m | 4 +- .../4_neural}/demo_behaviouralDCM.m | 4 +- .../4_neural}/demo_dcm4fmri.m | 6 +- .../4_neural}/demo_dcm4fmri_distributed.m | 8 +- .../4_neural}/demo_dcm_1region.m | 6 +- .../4_neural}/demo_dcm_motorPremotor.m | 2 +- .../4_neural}/demo_dcmonline.m | 23 +- .../4_neural}/demo_fitzhugh.m | 4 +- {subfunctions => demos/4_neural}/demo_lin2D.m | 6 +- .../4_neural}/demo_micro2macro.m | 6 +- .../4_neural}/demo_negfeedback.m | 2 +- .../4_neural}/demo_stability_HRF.m | 6 +- .../4_neural/demo_stability_hrf.mat | Bin .../BMM => demos/5_classification}/demo_BMM.m | 0 .../CRP => demos/5_classification}/demo_DP.m | 0 .../GMM => demos/5_classification}/demo_GMM.m | 0 .../5_classification}/demo_Henon.m | 21 +- .../5_classification}/demo_bin.m | 6 +- .../5_classification}/demo_classification.m | 4 +- .../6_physics}/demo_2Dlin.m | 6 +- {subfunctions => demos/6_physics}/demo_LV2D.m | 12 +- .../6_physics}/demo_Lorenz.m | 13 +- .../6_physics}/demo_Oscillatory.m | 9 +- .../6_physics}/demo_Rossler.m | 20 +- {subfunctions => demos/6_physics}/demo_SHC.m | 6 +- .../6_physics}/demo_VanDerPol.m | 10 +- .../6_physics}/demo_doubleWell.m | 41 ++- .../7_mathematics}/demo_Elogsig.m | 12 +- .../7_mathematics}/demo_gaussian.m | 0 .../7_mathematics}/demo_imageRegistration.m | 0 .../7_mathematics}/demo_logNormal.m | 2 +- .../7_mathematics}/demo_prodsig.m | 6 +- {subfunctions => demos/_models}/ObsRecGen.m | 10 +- {subfunctions => demos/_models}/U_dummy.m | 0 .../_models}/evolution0bisND.m | 4 +- .../_models}/f_2DneuralField.m | 0 {subfunctions => demos/_models}/f_2d.m | 0 {subfunctions => demos/_models}/f_2dwu.m | 0 {subfunctions => demos/_models}/f_AR.m | 0 {subfunctions => demos/_models}/f_ARn.m | 6 +- {subfunctions => demos/_models}/f_ARplus.m | 0 {subfunctions => demos/_models}/f_AVL.m | 14 +- {subfunctions => demos/_models}/f_BSL.m | 6 +- {subfunctions => demos/_models}/f_BSLinGame.m | 2 +- {subfunctions => demos/_models}/f_CaBBI_FHN.m | 0 .../_models}/f_CaBBI_QGIF.m | 0 {subfunctions => demos/_models}/f_DCMwHRF.m | 0 .../_models}/f_DCMwHRFext.m | 0 .../_models}/f_FitzHughNagumo.m | 0 .../_models}/f_FitzHughNagumo_calcium.m | 0 {subfunctions => demos/_models}/f_HGFinGame.m | 2 +- {subfunctions => demos/_models}/f_HH.m | 14 +- {subfunctions => demos/_models}/f_HRF.m | 4 +- {subfunctions => demos/_models}/f_HRF2.m | 0 {subfunctions => demos/_models}/f_HRF3.m | 0 {subfunctions => demos/_models}/f_Hampton.m | 10 +- {subfunctions => demos/_models}/f_Henon.m | 0 f_Id.m => demos/_models/f_Id.m | 0 {subfunctions => demos/_models}/f_L1.m | 0 {subfunctions => demos/_models}/f_LV2D.m | 0 {subfunctions => demos/_models}/f_Lorenz.m | 0 .../_models}/f_LotkaVolterra.m | 0 {subfunctions => demos/_models}/f_OpLearn.m | 0 {subfunctions => demos/_models}/f_PSP.m | 0 {subfunctions => demos/_models}/f_Qlearn.m | 2 +- {subfunctions => demos/_models}/f_Qlearn2.m | 0 .../_models}/f_Qlearn_dynLR.m | 0 .../_models}/f_Qlearn_gammaLR.m | 0 {models => demos/_models}/f_Qlearning.m | 2 +- demos/_models/f_QlearningAsym.m | 76 ++++ {subfunctions => demos/_models}/f_RLinGame.m | 2 +- demos/_models/f_Rossler.m | 17 + demos/_models/f_SHC.m | 76 ++++ {subfunctions => demos/_models}/f_VBfree.m | 0 .../_models}/f_VBvolatile0.m | 10 +- demos/_models/f_alpha.m | 8 + {subfunctions => demos/_models}/f_dbw.m | 0 {subfunctions => demos/_models}/f_dcm4fmri.m | 0 {subfunctions => demos/_models}/f_dcm4fmri0.m | 0 .../_models}/f_dcm_extension.m | 0 {subfunctions => demos/_models}/f_dcm_withU.m | 0 .../_models}/f_doubleWell.m | 0 f_embed.m => demos/_models/f_embed.m | 0 {subfunctions => demos/_models}/f_embed0.m | 0 {subfunctions => demos/_models}/f_embedAR.m | 0 .../_models}/f_fullDCM4fmri.m | 0 {subfunctions => demos/_models}/f_gen.m | 0 {subfunctions => demos/_models}/f_kToM.m | 2 +- {subfunctions => demos/_models}/f_lin1D.m | 0 demos/_models/f_lin2D.m | 21 ++ {subfunctions => demos/_models}/f_metaToM.m | 22 +- .../_models}/f_replicator.m | 0 demos/_models/f_rwl.m | 7 + {subfunctions => demos/_models}/f_rwl2.m | 8 +- {subfunctions => demos/_models}/f_vanDerPol.m | 0 demos/_models/f_vgo.m | 2 + {subfunctions => demos/_models}/f_wsls.m | 0 .../_models}/f_wslsinGame.m | 2 +- .../_models}/g_2AFC_basis.m | 4 +- {subfunctions => demos/_models}/g_AVL.m | 1 - {subfunctions => demos/_models}/g_BSL.m | 4 +- {subfunctions => demos/_models}/g_BSLinGame.m | 2 +- {subfunctions => demos/_models}/g_CaBBI.m | 0 .../_models}/g_DCMwHRFext.m | 0 {subfunctions => demos/_models}/g_DG2.m | 0 .../_models}/g_DoubleGamma.m | 0 {subfunctions => demos/_models}/g_ERP.m | 0 .../_models}/g_ERP_reduced.m | 0 {subfunctions => demos/_models}/g_ExpUtil.m | 10 +- {subfunctions => demos/_models}/g_Fourier.m | 0 {subfunctions => demos/_models}/g_GLM.m | 0 .../_models}/g_GLM4decoding.m | 0 .../_models}/g_GLM_missingData.m | 0 demos/_models/g_GLMsparse.m | 4 + demos/_models/g_GLMsparseAdapt.m | 10 + .../_models}/g_GammaDensity.m | 0 {subfunctions => demos/_models}/g_Gaussian.m | 0 {subfunctions => demos/_models}/g_HGFinGame.m | 4 +- {subfunctions => demos/_models}/g_HRF3.m | 2 +- .../_models}/g_HRF_distributed.m | 0 {subfunctions => demos/_models}/g_Hampton.m | 4 +- demos/_models/g_Id.m | 28 ++ {subfunctions => demos/_models}/g_Id_phi.m | 0 {subfunctions => demos/_models}/g_LinDecomp.m | 2 +- {subfunctions => demos/_models}/g_NI.m | 0 demos/_models/g_QLearning.m | 74 ++++ {subfunctions => demos/_models}/g_RFX.m | 0 {subfunctions => demos/_models}/g_Udummy.m | 0 .../_models}/g_VBvolatile0.m | 6 +- {subfunctions => demos/_models}/g_classif.m | 0 {subfunctions => demos/_models}/g_classif0.m | 2 +- g_conv0.m => demos/_models/g_conv0.m | 10 +- g_convSig.m => demos/_models/g_convSig.m | 2 +- .../_models}/g_convSig_approx.m | 3 +- .../_models/g_conv_approx.m | 33 ++ .../_models}/g_demo_extended.m | 0 .../_models}/g_demo_susceptibility.m | 0 .../_models}/g_discounting.m | 4 +- {subfunctions => demos/_models}/g_dummy.m | 0 g_embed.m => demos/_models/g_embed.m | 0 {subfunctions => demos/_models}/g_embedAR.m | 0 {subfunctions => demos/_models}/g_exp.m | 0 {subfunctions => demos/_models}/g_exp2d.m | 0 .../_models}/g_fullDCM4fmri.m | 0 {subfunctions => demos/_models}/g_goNogo.m | 0 {subfunctions => demos/_models}/g_ip.m | 0 {subfunctions => demos/_models}/g_kToM.m | 0 {subfunctions => demos/_models}/g_logistic.m | 12 +- {subfunctions => demos/_models}/g_matmap.m | 0 {subfunctions => demos/_models}/g_metaToM.m | 2 +- {subfunctions => demos/_models}/g_mixU.m | 0 {subfunctions => demos/_models}/g_nl0.m | 0 {subfunctions => demos/_models}/g_odds.m | 0 {subfunctions => demos/_models}/g_odds2.m | 0 {subfunctions => demos/_models}/g_rbf.m | 0 {subfunctions => demos/_models}/g_rigid2D.m | 0 {subfunctions => demos/_models}/g_sig.m | 0 {subfunctions => demos/_models}/g_sig_u.m | 2 +- demos/_models/g_sigm.m | 14 + demos/_models/g_sigm_binomial.m | 29 ++ {subfunctions => demos/_models}/g_sigmoid.m | 22 +- {subfunctions => demos/_models}/g_softmax.m | 4 +- .../_models}/g_softmax4decoding.m | 0 {subfunctions => demos/_models}/g_ttest.m | 4 +- {subfunctions => demos/_models}/g_u2c.m | 7 +- {subfunctions => demos/_models}/g_u2p.m | 7 +- demos/_models/g_vgo.m | 2 + .../_models}/g_wrap_perseveration.m | 6 +- {subfunctions => demos/_models}/h_Id.m | 0 {subfunctions => demos/_models}/h_goNogo.m | 0 .../_models}/h_randOutcome.m | 0 {subfunctions => demos/_models}/h_truefalse.m | 0 {subfunctions => demos/_models}/h_whichItem.m | 0 .../_models}/v_discounting.m | 2 +- factorial_struct.m | 63 ---- getF.m | 18 - getHyperpriors.m | 48 --- getKernels.m | 85 ----- isbinary.m | 29 -- isweird.m | 31 -- legacy/GaussNewton.m | 13 + legacy/README.md | 5 + legacy/VBA_getISqrtMat.m | 11 + VBA_groupBMCbtw.m => legacy/VBA_groupBMCbtw.m | 0 legacy/VBA_sample.m | 36 ++ legacy/checkGX_binomial.m | 8 + legacy/cov2corr.m | 8 + legacy/empiricalHist.m | 8 + legacy/getHyperpriors.m | 8 + legacy/getSubplots.m | 8 + legacy/get_MCMC_predictiveDensity.m | 24 ++ legacy/get_MCMC_predictiveDensity_fb.m | 25 ++ {subfunctions => legacy}/invSparsify.m | 0 legacy/invsigmoid.m | 8 + legacy/isbinary.m | 8 + legacy/isweird.m | 8 + legacy/iswithin.m | 8 + legacy/numericDiff.m | 8 + legacy/sampleFromArbitraryP.m | 17 + legacy/sgm.m | 11 + legacy/sig.m | 8 + legacy/sigm.m | 41 +++ legacy/sigmoid.m | 8 + legacy/simulateNLSS.m | 12 + legacy/simulateNLSS_fb.m | 8 + legacy/sparseTransform.m | 8 + legacy/sparsify.m | 8 + {subfunctions => legacy/trashbin}/ERP_dcm.m | 0 .../trashbin}/compare_struct.m | 0 .../trashbin}/create2dbf.m | 0 .../trashbin}/extractFIR.m | 2 +- {subfunctions => legacy/trashbin}/getFamily.m | 0 {subfunctions => legacy/trashbin}/get_ARcov.m | 0 .../trashbin}/gridL_binomial.m | 3 +- {stats&plots => legacy/trashbin}/hatch.m | 0 .../trashbin}/isHrfStable.m | 0 {subfunctions => legacy/trashbin}/logExp.m | 0 {stats&plots => legacy/trashbin}/smooth2.m | 0 legacy/vec.m | 8 + .../DCM}/BOLD_parameters.m | 0 .../DCM}/addConfounds2dcm.m | 0 {subfunctions => modules/DCM}/dcm2vba.m | 0 .../DCM}/defaultHRFparams.m | 0 {subfunctions => modules/DCM}/extend_dcm.m | 0 .../DCM}/getOptions4dcm.m | 2 + {subfunctions => modules/DCM}/getPriors.m | 37 +- {subfunctions => modules/DCM}/get_HRFparams.m | 0 {subfunctions => modules/DCM}/get_U_basis.m | 2 +- {subfunctions => modules/DCM}/prepare_dcm.m | 0 .../DCM}/prepare_fullDCM.m | 0 {subfunctions => modules/DCM}/u_Fourier.m | 0 .../DCM}/u_FourierComplete.m | 0 .../DCM}/u_GaussianBumps.m | 0 {subfunctions => modules/DCM}/u_RBF.m | 0 {subfunctions => modules/DCM}/vba2dcm.m | 2 +- .../GLM}/Contrast_MEbins.m | 0 {stats&plots => modules/GLM}/GLM_contrast.m | 20 +- {stats&plots => modules/GLM}/GLM_covComp.m | 0 {stats&plots => modules/GLM}/GLM_tolerance.m | 0 {stats&plots => modules/GLM}/PRESS_GLM.m | 0 {stats&plots => modules/GLM}/SEM_analysis0.m | 4 +- {stats&plots => modules/GLM}/lev_GLM.m | 0 .../GLM}/mediationAnalysis0.m | 6 +- .../GLM}/mediation_contrast.m | 0 {subfunctions => modules/OTO}/VarVolatility.m | 0 {subfunctions => modules/OTO}/expBinom.m | 2 +- .../OTO}/getPosteriorfromOTO.m | 4 +- .../OTO}/unwrapVBvolatileOTO.m | 18 +- .../classical_statistics}/bayesian_ttest.m | 8 +- .../classical_statistics}/doROC.m | 4 +- .../classification}/BMM/MixtureOfBinomials.m | 0 .../classification}/BMM/generateBMM.m | 2 +- .../classification}/CRP/VB_CRP.m | 0 .../classification}/CRP/simulate_CRP.m | 4 +- .../classification}/GMM/PCA_MoG.m | 0 .../classification}/GMM/VBA_MoG.m | 28 +- .../classification}/GMM/VBA_projectMoG.m | 2 +- .../classification}/GMM/VBEM_GM.m | 2 +- .../classification}/GMM/dist.m | 0 .../classification}/GMM/generateGMM.m | 0 .../classification}/GMM/plotResults.m | 0 .../classification/VBA_classification.m | 9 +- .../random_field_theory}/RFT_Euler.m | 0 .../random_field_theory}/RFT_GLM_contrast.m | 2 +- .../random_field_theory}/RFT_Gtf.m | 0 .../random_field_theory}/RFT_Pval.m | 0 .../random_field_theory}/RFT_ReDisplay.m | 2 +- .../random_field_theory}/RFT_clusters.m | 6 +- .../random_field_theory}/RFT_expectedTopo.m | 2 +- .../random_field_theory}/RFT_localmax.m | 2 +- .../random_field_theory}/RFT_main.m | 4 +- .../random_field_theory}/RFT_rescaling.m | 0 .../random_field_theory}/RFT_smoothness.m | 0 .../theory_of_mind}/RecToMfunction.m | 22 +- .../theory_of_mind}/defIndlev.m | 0 .../theory_of_mind}/extractKernels.m | 0 .../theory_of_mind}/fplayer.m | 0 .../theory_of_mind}/get_VolterraInGames.m | 4 +- .../theory_of_mind}/prepare_agent.m | 6 +- .../theory_of_mind}/prepare_kToM.m | 0 .../theory_of_mind}/prepare_metaToM.m | 2 +- .../theory_of_mind}/runGame_2players.m | 27 +- .../theory_of_mind}/script_test_rec.m | 18 +- .../theory_of_mind}/sizeXrec.m | 0 .../theory_of_mind}/unwrapKTOM.m | 8 +- numericDiff.m | 74 ---- sampleFromArbitraryP.m | 29 -- VBA_UNL0.m => sandbox/VBA_UNL0.m | 4 +- .../demo_bayesian_ttest.m | 26 +- {subfunctions => sandbox}/demo_dummyUNL.m | 0 simulateNLSS_fb.m | 4 - stats&plots/addBestLinearPredictor.m | 36 -- stats&plots/approxOnGrid.m | 21 -- stats&plots/displayUncertainSigmoid.m | 79 ---- stats&plots/empiricalHist.m | 41 --- stats&plots/findCI.m | 36 -- stats&plots/fisher.m | 8 - stats&plots/maxMat.m | 7 - stats&plots/medianfilter0.m | 34 -- stats&plots/mnan.m | 11 - stats&plots/nanzscore.m | 46 --- stats&plots/normalize.m | 9 - stats&plots/pinvComplex.m | 15 - stats&plots/removeOutliers.m | 28 -- stats&plots/snan.m | 10 - stats&plots/sparsify.m | 29 -- stats&plots/testPower.m | 26 -- subfunctions/Av.m | 5 - subfunctions/Contrast_MEbins.m | 12 - subfunctions/GetGitPath.m | 75 ---- subfunctions/SSE.m | 3 - subfunctions/VBA_bernoulli.m | 11 - subfunctions/balanced_accuracy.m | 21 -- subfunctions/checkF.m | 32 -- subfunctions/check_constrasts.m | 14 - subfunctions/dMatdvec.m | 12 - subfunctions/demo_AR1.m | 114 ------ subfunctions/demo_BSL.m | 71 ---- subfunctions/demo_HH.m | 86 ----- subfunctions/demo_HodgkinHuxley.m | 75 ---- subfunctions/demo_binomial.m | 73 ---- subfunctions/demo_binomial_AdaptDesign.m | 113 ------ subfunctions/demo_binomial_adapt.m | 57 --- subfunctions/demo_bmc4glm.m | 60 ---- subfunctions/demo_dynsys.m | 125 ------- subfunctions/demo_getConvKernel.m | 58 --- subfunctions/demo_irregular.m | 45 --- subfunctions/demo_linear.m | 42 --- subfunctions/demo_multisession.m | 103 ------ subfunctions/demo_multisource.m | 63 ---- subfunctions/demo_sparsePriors.m | 180 ---------- subfunctions/demo_sparsify.m | 107 ------ subfunctions/demo_stochasticBinomial.m | 65 ---- subfunctions/dsdv.m | 8 - subfunctions/elvis.m | 10 - subfunctions/f_Rossler.m | 13 - subfunctions/f_SHC.m | 98 ----- subfunctions/f_alpha.m | 4 - subfunctions/f_embed_old.m | 34 -- subfunctions/f_lin2D.m | 25 -- subfunctions/f_rwl.m | 5 - subfunctions/f_try.m | 7 - subfunctions/f_vgo.m | 2 - subfunctions/g_GLMsparse.m | 4 - subfunctions/g_GLMsparse2.m | 6 - subfunctions/g_Id.m | 30 -- subfunctions/g_conv_approx.m | 31 -- subfunctions/g_probit.m | 2 - subfunctions/g_sigm.m | 9 - subfunctions/g_sigm_binomial.m | 18 - subfunctions/g_sqrtSig.m | 22 -- subfunctions/g_vgo.m | 2 - subfunctions/get_sigmoidMoments.m | 35 -- subfunctions/invsigmoid.m | 8 - subfunctions/iswithin.m | 29 -- subfunctions/medianfilter0.m | 34 -- subfunctions/multi2num.m | 7 - subfunctions/multinomial_accuracy.m | 40 --- subfunctions/nanzscore.m | 46 --- subfunctions/num2multi.m | 9 - subfunctions/parseargs.m | 97 ----- subfunctions/sgm.m | 11 - subfunctions/show_potential.m | 20 -- subfunctions/sig.m | 4 - subfunctions/sigm.m | 79 ---- subfunctions/sigmoid.m | 5 - subfunctions/softmax.m | 6 - subfunctions/softmaxMat.m | 5 - subfunctions/sparseTransform.m | 15 - subfunctions/spear.m | 124 ------- subfunctions/unwrapKBSL.m | 54 --- subfunctions/util.m | 7 - tests/VBA_test.m | 11 + tests/demos/TestAllDemos.m | 32 ++ tests/utils/test_VBA_r2.m | 44 +++ tests/utils/test_VBA_random.m | 340 ++++++++++++++++++ tests/utils/test_VBA_sigmoid.m | 99 +++++ tests/utils/test_VBA_sqrtm.m | 57 +++ {subfunctions => thrid-party}/getGitInfo.m | 0 {stats&plots => thrid-party}/hist2.m | 0 {subfunctions => thrid-party}/nanfft.m | 0 {stats&plots => thrid-party}/spear.m | 0 thrid-party/spm/VBA_spm_dcm_build.m | 26 +- thrid-party/spm/VBA_spm_dcm_explore.m | 18 +- thrid-party/spm/VBA_spm_dcm_fmri_check.m | 2 +- thrid-party/spm/VBA_spm_gamrnd.c | 12 +- thrid-party/spm/VBA_spm_gamrnd.m | 2 +- thrid-party/spm/VBA_spm_gamrnd.mexa64 | Bin 14752 -> 13058 bytes thrid-party/spm/VBA_spm_gamrnd.mexglx | Bin 8725 -> 0 bytes thrid-party/spm/VBA_spm_gamrnd.mexmaci | Bin 12852 -> 0 bytes thrid-party/spm/VBA_spm_gamrnd.mexmaci64 | Bin 8840 -> 9048 bytes thrid-party/spm/VBA_spm_gamrnd.mexw32 | Bin 7168 -> 8192 bytes thrid-party/spm/VBA_spm_gamrnd.mexw64 | Bin 9728 -> 9728 bytes stats&plots/EVprodX.m => utils/VBA_EVprodX.m | 2 +- VBA_ElogBeta.m => utils/VBA_ElogBeta.m | 10 +- subfunctions/Elogsig.m => utils/VBA_Elogsig.m | 22 +- .../VBA_ExceedanceProb.m | 31 +- .../Fourier2DBF.m => utils/VBA_Fourier2DBF.m | 2 +- stats&plots/FtoR2.m => utils/VBA_FtoR2.m | 23 +- .../GaussNewton.m => utils/VBA_GaussNewton.m | 2 +- utils/VBA_JensenShannon.m | 159 ++++++++ VBA_KL.m => utils/VBA_KL.m | 4 +- VBA_PP0.m => utils/VBA_PP0.m | 6 +- VBA_PPM.m => utils/VBA_PPM.m | 2 +- .../VBA_SavageDickey.m | 6 +- VBA_Shapley.m => utils/VBA_Shapley.m | 7 +- utils/VBA_abs.m | 39 ++ utils/VBA_accuracy.m | 102 ++++++ VBA_bDCM_lesion.m => utils/VBA_bDCM_lesion.m | 4 +- VBA_binomial.m => utils/VBA_binomial.m | 0 VBA_checkGN.m => utils/VBA_checkGN.m | 0 .../VBA_check_struct.m | 0 .../VBA_comparePredictions.m | 4 +- VBA_conv2glm.m => utils/VBA_conv2glm.m | 0 .../cov2corr.m => utils/VBA_cov2corr.m | 22 +- VBA_dcmMatrices.m => utils/VBA_dcmMatrices.m | 0 utils/VBA_dirichlet_moments.m | 24 ++ utils/VBA_empiricalDensity.m | 77 ++++ utils/VBA_factorial_struct.m | 89 +++++ utils/VBA_finiteBinomial.m | 23 ++ utils/VBA_fisher.m | 23 ++ VBA_getNtuples.m => utils/VBA_getNtuples.m | 0 getSubplots.m => utils/VBA_getSubplots.m | 128 +++---- VBA_getVar.m => utils/VBA_getVar.m | 0 utils/VBA_guessHyperpriors.m | 48 +++ utils/VBA_indicator.m | 75 ++++ VBA_inv.m => utils/VBA_inv.m | 0 utils/VBA_invComplex.m | 29 ++ utils/VBA_isBinary.m | 33 ++ utils/VBA_isInRange.m | 40 +++ utils/VBA_isWeird.m | 30 ++ utils/VBA_issymmetric.m | 19 + {stats&plots => utils}/VBA_kstest.m | 0 VBA_logDet.m => utils/VBA_logDet.m | 2 +- utils/VBA_logSumExp.m | 39 ++ utils/VBA_maxMat.m | 44 +++ {subfunctions => utils}/VBA_nanmean.m | 6 +- {subfunctions => utils}/VBA_nanstd.m | 0 {subfunctions => utils}/VBA_nanvar.m | 0 {stats&plots => utils}/VBA_ncfcdf.m | 0 utils/VBA_numericDiff.m | 81 +++++ .../optimCost.m => utils/VBA_optimCost.m | 10 +- VBA_orth.m => utils/VBA_orth.m | 0 utils/VBA_power.m | 35 ++ stats&plots/prodX.m => utils/VBA_prodX.m | 12 +- VBA_psi.m => utils/VBA_psi.m | 0 {stats&plots => utils}/VBA_quantile.m | 2 +- utils/VBA_r2.m | 52 +++ utils/VBA_random.m | 287 +++++++++++++++ utils/VBA_removeOutliers.m | 53 +++ utils/VBA_sigmoid.m | 206 +++++++++++ utils/VBA_sign.m | 46 +++ utils/VBA_softmax.m | 58 +++ utils/VBA_sparsifyPrior.m | 65 ++++ utils/VBA_sqrtm.m | 51 +++ .../VBA_susceptibility.m | 2 +- utils/VBA_vec.m | 23 ++ vec.m | 12 - 610 files changed, 7097 insertions(+), 5089 deletions(-) delete mode 100644 VBA_JensenShannon.m rename get_MCMC_predictiveDensity.m => VBA_MCMC_predictiveDensity.m (87%) rename get_MCMC_predictiveDensity_fb.m => VBA_MCMC_predictiveDensity_fb.m (87%) delete mode 100644 VBA_disp.m delete mode 100644 VBA_getISqrtMat.m delete mode 100644 VBA_getKernels.m delete mode 100644 VBA_get_dL.m delete mode 100644 VBA_sample.m rename simulateNLSS.m => VBA_simulate.m (63%) delete mode 100644 VBA_unit_tests.m delete mode 100644 checkGX_binomial.m rename VBA_FreeEnergy.m => core/VBA_FreeEnergy.m (98%) rename VBA_GN.m => core/VBA_GN.m (75%) rename VBA_Hpost.m => core/VBA_Hpost.m (100%) rename VBA_IX0.m => core/VBA_IX0.m (95%) rename VBA_IX_lagged.m => core/VBA_IX_lagged.m (94%) rename VBA_IX_lagged_binomial.m => core/VBA_IX_lagged_binomial.m (96%) rename VBA_Initialize.m => core/VBA_Initialize.m (96%) rename VBA_Iphi.m => core/VBA_Iphi.m (95%) rename VBA_Iphi_UNL.m => core/VBA_Iphi_UNL.m (87%) rename VBA_Iphi_binomial.m => core/VBA_Iphi_binomial.m (94%) create mode 100644 core/VBA_Iphi_extended.m rename VBA_Iphi_split.m => core/VBA_Iphi_split.m (95%) rename VBA_Itheta.m => core/VBA_Itheta.m (94%) rename VBA_VarParam.m => core/VBA_VarParam.m (100%) rename VBA_check.m => core/VBA_check.m (90%) rename VBA_check4DCM.m => core/VBA_check4DCM.m (100%) rename VBA_check_errors.m => core/VBA_check_errors.m (96%) create mode 100644 core/VBA_disp.m rename VBA_evalFun.m => core/VBA_evalFun.m (87%) rename VBA_fillInPriors.m => core/VBA_fillInPriors.m (100%) rename VBA_getDefaults.m => core/VBA_getDefaults.m (96%) rename VBA_getU.m => core/VBA_getU.m (92%) create mode 100644 core/VBA_get_dL.m rename VBA_microTime.m => core/VBA_microTime.m (91%) rename {subfunctions => core}/VBA_multisession_expand.m (98%) rename {subfunctions => core}/VBA_multisession_factor.m (100%) rename VBA_odeLim.m => core/VBA_odeLim.m (100%) rename VBA_odeLim2NLSS.m => core/VBA_odeLim2NLSS.m (100%) rename VBA_onlineWrapper.m => core/VBA_onlineWrapper.m (91%) rename VBA_priors.m => core/VBA_priors.m (96%) rename VBA_wrapup.m => core/VBA_wrapup.m (98%) rename VBA_getDiagnostics.m => core/diagnostics/VBA_getDiagnostics.m (98%) rename VBA_fit.m => core/diagnostics/VBA_getFit.m (65%) rename VBA_getSuffStat.m => core/diagnostics/VBA_getSuffStat.m (96%) rename VBA_VolterraKernels.m => core/diagnostics/VBA_getVolterraKernels.m (88%) rename {stats&plots => core/display}/MoveAxisToOrigin.m (100%) rename {stats&plots => core/display}/Plot3AxisAtOrigin.m (100%) rename {stats&plots => core/display}/PlotAxisAtOrigin.m (100%) rename VBA_Bin2Cont.m => core/display/VBA_Bin2Cont.m (100%) rename VBA_classification_display.m => core/display/VBA_classification_display.m (99%) rename VBA_displayGrads.m => core/display/VBA_displayGrads.m (96%) rename VBA_displayGroupBMC.m => core/display/VBA_displayGroupBMC.m (99%) rename VBA_displayGroupBMCbtw.m => core/display/VBA_displayGroupBMCbtw.m (95%) rename VBA_displayMFX.m => core/display/VBA_displayMFX.m (99%) create mode 100644 core/display/VBA_figure.m rename VBA_initDisplay.m => core/display/VBA_initDisplay.m (98%) rename VBA_pause.m => core/display/VBA_pause.m (80%) rename VBA_summary.m => core/display/VBA_summary.m (95%) rename VBA_summaryMFX.m => core/display/VBA_summaryMFX.m (64%) rename {stats&plots => core/display}/VBA_title.m (100%) rename VBA_updateDisplay.m => core/display/VBA_updateDisplay.m (61%) rename {subfunctions => core/display}/displayResults.m (85%) rename {subfunctions => core/display}/displaySimulations.m (97%) rename {stats&plots => core/display}/getColors.m (100%) rename {stats&plots => core/display}/getPanel.m (100%) rename {stats&plots => core/display}/plotDensity.m (95%) rename {stats&plots => core/display}/plotElipse.m (100%) rename {stats&plots => core/display}/plotGraph3D.m (98%) rename {stats&plots => core/display}/plotUncertainTimeSeries.m (64%) rename {stats&plots => core/display}/plotVolterra.m (100%) rename {stats&plots => core/display}/rotateXLabels.m (100%) rename getStateParamInput.m => core/sugar/getStateParamInput.m (100%) rename {subfunctions => core/sugar}/priorPrettifyer.m (100%) rename {subfunctions => core/sugar}/priorUglyfier.m (100%) rename setInput.m => core/sugar/setInput.m (100%) rename setPriors.m => core/sugar/setPriors.m (100%) rename VBA_FreeEnergy_UNL.m => core/unormalizedLikelihood/VBA_FreeEnergy_UNL.m (98%) rename VBA_UNLtemp.m => core/unormalizedLikelihood/VBA_UNLtemp.m (97%) rename VBA_evalAL.m => core/unormalizedLikelihood/VBA_evalAL.m (92%) rename VBA_evalAL2.m => core/unormalizedLikelihood/VBA_evalAL2.m (88%) create mode 100755 demos/0_basics/demo_dynamicalSystem.m create mode 100644 demos/0_basics/demo_excludeData.m create mode 100755 demos/0_basics/demo_modelComparison.m create mode 100644 demos/0_basics/demo_multisession.m create mode 100644 demos/0_basics/demo_sources.m create mode 100644 demos/0_basics/demo_staticModel.m rename {subfunctions => demos/1_advanced}/demo_MCsampling.m (90%) rename {subfunctions => demos/1_advanced}/demo_MFX.m (64%) rename {subfunctions => demos/1_advanced}/demo_RFX.m (96%) create mode 100644 demos/1_advanced/demo_VolterraKernels.m rename {subfunctions => demos/1_advanced}/demo_delays.m (95%) create mode 100644 demos/1_advanced/demo_designOptimization.m create mode 100644 demos/1_advanced/demo_noiseAR1.m create mode 100644 demos/1_advanced/demo_stochasticModel.m rename {subfunctions => demos/1_advanced}/demo_susceptibility.m (97%) rename {subfunctions => demos/1_advanced}/demo_trainTest.m (94%) rename VBA_LinDecomp.m => demos/2_statistics/VBA_LinDecomp.m (95%) rename {subfunctions => demos/2_statistics}/demo_CI.m (92%) rename demos/{statistics => 2_statistics}/demo_GLM.m (100%) rename {stats&plots => demos/2_statistics}/demo_GLM_missingData.m (96%) rename {subfunctions => demos/2_statistics}/demo_KalmanSmoother.m (97%) rename {subfunctions => demos/2_statistics}/demo_LinDecomp.m (100%) rename {stats&plots => demos/2_statistics}/demo_RFT.m (98%) rename {stats&plots => demos/2_statistics}/demo_RFT_GLM.m (98%) rename {subfunctions => demos/2_statistics}/demo_covComp.m (91%) rename {stats&plots => demos/2_statistics}/demo_generalizability.m (96%) rename {subfunctions => demos/2_statistics}/demo_groupbtw.m (86%) rename demos/{statistics => 2_statistics}/demo_logisticRegression.m (93%) rename {stats&plots => demos/2_statistics}/demo_mediation.m (100%) rename {subfunctions => demos/2_statistics}/demo_redundancy.m (97%) create mode 100644 demos/2_statistics/demo_sparsityPrior.m rename {subfunctions => demos/3_behavioural}/demo_2DChoices.m (90%) rename {subfunctions => demos/3_behavioural}/demo_AVL_recog.m (91%) create mode 100644 demos/3_behavioural/demo_BSL.m rename demos/{behavioural => 3_behavioural}/demo_Qlearning.m (94%) create mode 100644 demos/3_behavioural/demo_QlearningAsymetric.m rename demos/{behavioural => 3_behavioural}/demo_QlearningSimulation.m (94%) rename {subfunctions => demos/3_behavioural}/demo_ToMgames.m (100%) rename {subfunctions => demos/3_behavioural}/demo_VBfree.m (92%) rename {subfunctions => demos/3_behavioural}/demo_asymRW.m (91%) rename {subfunctions => demos/3_behavioural}/demo_discounting.m (93%) rename {subfunctions => demos/3_behavioural}/demo_dynLearningRate.m (91%) rename {subfunctions => demos/3_behavioural}/demo_influenceLearning.m (87%) rename {subfunctions => demos/3_behavioural}/demo_recur.m (93%) rename {subfunctions => demos/3_behavioural}/demo_volatileVB.m (90%) rename {subfunctions => demos/3_behavioural}/demo_wsls.m (92%) rename {subfunctions => demos/4_neural}/compHRFs.m (100%) rename {subfunctions => demos/4_neural}/demo_2DneuralField.m (93%) rename {subfunctions => demos/4_neural}/demo_CaBBI_FHN.m (100%) rename {subfunctions => demos/4_neural}/demo_CaBBI_QGIF.m (100%) rename {subfunctions => demos/4_neural}/demo_FHN.m (93%) rename {subfunctions => demos/4_neural}/demo_HRF.m (97%) rename {subfunctions => demos/4_neural}/demo_HRF_distributed.m (91%) rename {subfunctions => demos/4_neural}/demo_HRF_dummy.m (100%) create mode 100644 demos/4_neural/demo_HodgkinHuxley.m rename {subfunctions => demos/4_neural}/demo_PSP.m (76%) rename {subfunctions => demos/4_neural}/demo_behaviouralDCM.m (94%) rename {subfunctions => demos/4_neural}/demo_dcm4fmri.m (95%) rename {subfunctions => demos/4_neural}/demo_dcm4fmri_distributed.m (96%) rename {subfunctions => demos/4_neural}/demo_dcm_1region.m (95%) rename {subfunctions => demos/4_neural}/demo_dcm_motorPremotor.m (97%) rename {subfunctions => demos/4_neural}/demo_dcmonline.m (89%) rename {subfunctions => demos/4_neural}/demo_fitzhugh.m (94%) rename {subfunctions => demos/4_neural}/demo_lin2D.m (85%) rename {subfunctions => demos/4_neural}/demo_micro2macro.m (97%) rename {subfunctions => demos/4_neural}/demo_negfeedback.m (98%) rename {subfunctions => demos/4_neural}/demo_stability_HRF.m (96%) rename subfunctions/phi.mat => demos/4_neural/demo_stability_hrf.mat (100%) rename {classification/BMM => demos/5_classification}/demo_BMM.m (100%) rename {classification/CRP => demos/5_classification}/demo_DP.m (100%) rename {classification/GMM => demos/5_classification}/demo_GMM.m (100%) rename {subfunctions => demos/5_classification}/demo_Henon.m (78%) rename {subfunctions => demos/5_classification}/demo_bin.m (94%) rename {subfunctions => demos/5_classification}/demo_classification.m (96%) rename {subfunctions => demos/6_physics}/demo_2Dlin.m (89%) rename {subfunctions => demos/6_physics}/demo_LV2D.m (80%) rename {subfunctions => demos/6_physics}/demo_Lorenz.m (88%) rename {subfunctions => demos/6_physics}/demo_Oscillatory.m (89%) rename {subfunctions => demos/6_physics}/demo_Rossler.m (81%) rename {subfunctions => demos/6_physics}/demo_SHC.m (89%) rename {subfunctions => demos/6_physics}/demo_VanDerPol.m (88%) rename {subfunctions => demos/6_physics}/demo_doubleWell.m (72%) rename {subfunctions => demos/7_mathematics}/demo_Elogsig.m (50%) rename {subfunctions => demos/7_mathematics}/demo_gaussian.m (100%) rename {subfunctions => demos/7_mathematics}/demo_imageRegistration.m (100%) rename {subfunctions => demos/7_mathematics}/demo_logNormal.m (90%) rename {subfunctions => demos/7_mathematics}/demo_prodsig.m (87%) rename {subfunctions => demos/_models}/ObsRecGen.m (92%) rename {subfunctions => demos/_models}/U_dummy.m (100%) rename {subfunctions => demos/_models}/evolution0bisND.m (90%) rename {subfunctions => demos/_models}/f_2DneuralField.m (100%) rename {subfunctions => demos/_models}/f_2d.m (100%) rename {subfunctions => demos/_models}/f_2dwu.m (100%) rename {subfunctions => demos/_models}/f_AR.m (100%) rename {subfunctions => demos/_models}/f_ARn.m (75%) rename {subfunctions => demos/_models}/f_ARplus.m (100%) rename {subfunctions => demos/_models}/f_AVL.m (95%) rename {subfunctions => demos/_models}/f_BSL.m (92%) rename {subfunctions => demos/_models}/f_BSLinGame.m (97%) rename {subfunctions => demos/_models}/f_CaBBI_FHN.m (100%) rename {subfunctions => demos/_models}/f_CaBBI_QGIF.m (100%) rename {subfunctions => demos/_models}/f_DCMwHRF.m (100%) rename {subfunctions => demos/_models}/f_DCMwHRFext.m (100%) rename {subfunctions => demos/_models}/f_FitzHughNagumo.m (100%) rename {subfunctions => demos/_models}/f_FitzHughNagumo_calcium.m (100%) rename {subfunctions => demos/_models}/f_HGFinGame.m (95%) rename {subfunctions => demos/_models}/f_HH.m (83%) rename {subfunctions => demos/_models}/f_HRF.m (91%) rename {subfunctions => demos/_models}/f_HRF2.m (100%) rename {subfunctions => demos/_models}/f_HRF3.m (100%) rename {subfunctions => demos/_models}/f_Hampton.m (90%) rename {subfunctions => demos/_models}/f_Henon.m (100%) rename f_Id.m => demos/_models/f_Id.m (100%) rename {subfunctions => demos/_models}/f_L1.m (100%) rename {subfunctions => demos/_models}/f_LV2D.m (100%) rename {subfunctions => demos/_models}/f_Lorenz.m (100%) rename {subfunctions => demos/_models}/f_LotkaVolterra.m (100%) rename {subfunctions => demos/_models}/f_OpLearn.m (100%) rename {subfunctions => demos/_models}/f_PSP.m (100%) rename {subfunctions => demos/_models}/f_Qlearn.m (96%) rename {subfunctions => demos/_models}/f_Qlearn2.m (100%) rename {subfunctions => demos/_models}/f_Qlearn_dynLR.m (100%) rename {subfunctions => demos/_models}/f_Qlearn_gammaLR.m (100%) rename {models => demos/_models}/f_Qlearning.m (98%) create mode 100644 demos/_models/f_QlearningAsym.m rename {subfunctions => demos/_models}/f_RLinGame.m (97%) create mode 100644 demos/_models/f_Rossler.m create mode 100644 demos/_models/f_SHC.m rename {subfunctions => demos/_models}/f_VBfree.m (100%) rename {subfunctions => demos/_models}/f_VBvolatile0.m (85%) create mode 100755 demos/_models/f_alpha.m rename {subfunctions => demos/_models}/f_dbw.m (100%) rename {subfunctions => demos/_models}/f_dcm4fmri.m (100%) rename {subfunctions => demos/_models}/f_dcm4fmri0.m (100%) rename {subfunctions => demos/_models}/f_dcm_extension.m (100%) rename {subfunctions => demos/_models}/f_dcm_withU.m (100%) rename {subfunctions => demos/_models}/f_doubleWell.m (100%) rename f_embed.m => demos/_models/f_embed.m (100%) rename {subfunctions => demos/_models}/f_embed0.m (100%) rename {subfunctions => demos/_models}/f_embedAR.m (100%) rename {subfunctions => demos/_models}/f_fullDCM4fmri.m (100%) rename {subfunctions => demos/_models}/f_gen.m (100%) rename {subfunctions => demos/_models}/f_kToM.m (97%) rename {subfunctions => demos/_models}/f_lin1D.m (100%) create mode 100644 demos/_models/f_lin2D.m rename {subfunctions => demos/_models}/f_metaToM.m (88%) rename {subfunctions => demos/_models}/f_replicator.m (100%) create mode 100644 demos/_models/f_rwl.m rename {subfunctions => demos/_models}/f_rwl2.m (84%) rename {subfunctions => demos/_models}/f_vanDerPol.m (100%) create mode 100644 demos/_models/f_vgo.m rename {subfunctions => demos/_models}/f_wsls.m (100%) rename {subfunctions => demos/_models}/f_wslsinGame.m (97%) rename {subfunctions => demos/_models}/g_2AFC_basis.m (95%) rename {subfunctions => demos/_models}/g_AVL.m (97%) rename {subfunctions => demos/_models}/g_BSL.m (88%) rename {subfunctions => demos/_models}/g_BSLinGame.m (96%) rename {subfunctions => demos/_models}/g_CaBBI.m (100%) rename {subfunctions => demos/_models}/g_DCMwHRFext.m (100%) rename {subfunctions => demos/_models}/g_DG2.m (100%) rename {subfunctions => demos/_models}/g_DoubleGamma.m (100%) rename {subfunctions => demos/_models}/g_ERP.m (100%) rename {subfunctions => demos/_models}/g_ERP_reduced.m (100%) rename {subfunctions => demos/_models}/g_ExpUtil.m (71%) rename {subfunctions => demos/_models}/g_Fourier.m (100%) rename {subfunctions => demos/_models}/g_GLM.m (100%) rename {subfunctions => demos/_models}/g_GLM4decoding.m (100%) rename {stats&plots => demos/_models}/g_GLM_missingData.m (100%) create mode 100644 demos/_models/g_GLMsparse.m create mode 100644 demos/_models/g_GLMsparseAdapt.m rename {subfunctions => demos/_models}/g_GammaDensity.m (100%) rename {subfunctions => demos/_models}/g_Gaussian.m (100%) rename {subfunctions => demos/_models}/g_HGFinGame.m (93%) rename {subfunctions => demos/_models}/g_HRF3.m (98%) rename {subfunctions => demos/_models}/g_HRF_distributed.m (100%) rename {subfunctions => demos/_models}/g_Hampton.m (91%) create mode 100644 demos/_models/g_Id.m rename {subfunctions => demos/_models}/g_Id_phi.m (100%) rename {subfunctions => demos/_models}/g_LinDecomp.m (85%) rename {subfunctions => demos/_models}/g_NI.m (100%) create mode 100644 demos/_models/g_QLearning.m rename {subfunctions => demos/_models}/g_RFX.m (100%) rename {subfunctions => demos/_models}/g_Udummy.m (100%) rename {subfunctions => demos/_models}/g_VBvolatile0.m (91%) rename {subfunctions => demos/_models}/g_classif.m (100%) rename {subfunctions => demos/_models}/g_classif0.m (93%) rename g_conv0.m => demos/_models/g_conv0.m (93%) rename g_convSig.m => demos/_models/g_convSig.m (98%) rename {subfunctions => demos/_models}/g_convSig_approx.m (97%) rename subfunctions/kernel_sinexp.m => demos/_models/g_conv_approx.m (54%) rename {subfunctions => demos/_models}/g_demo_extended.m (100%) rename {subfunctions => demos/_models}/g_demo_susceptibility.m (100%) rename {subfunctions => demos/_models}/g_discounting.m (87%) rename {subfunctions => demos/_models}/g_dummy.m (100%) rename g_embed.m => demos/_models/g_embed.m (100%) rename {subfunctions => demos/_models}/g_embedAR.m (100%) rename {subfunctions => demos/_models}/g_exp.m (100%) rename {subfunctions => demos/_models}/g_exp2d.m (100%) rename {subfunctions => demos/_models}/g_fullDCM4fmri.m (100%) rename {subfunctions => demos/_models}/g_goNogo.m (100%) rename {subfunctions => demos/_models}/g_ip.m (100%) rename {subfunctions => demos/_models}/g_kToM.m (100%) rename {subfunctions => demos/_models}/g_logistic.m (50%) rename {subfunctions => demos/_models}/g_matmap.m (100%) rename {subfunctions => demos/_models}/g_metaToM.m (96%) rename {subfunctions => demos/_models}/g_mixU.m (100%) rename {subfunctions => demos/_models}/g_nl0.m (100%) rename {subfunctions => demos/_models}/g_odds.m (100%) rename {subfunctions => demos/_models}/g_odds2.m (100%) rename {subfunctions => demos/_models}/g_rbf.m (100%) rename {subfunctions => demos/_models}/g_rigid2D.m (100%) rename {subfunctions => demos/_models}/g_sig.m (100%) rename {subfunctions => demos/_models}/g_sig_u.m (53%) create mode 100644 demos/_models/g_sigm.m create mode 100644 demos/_models/g_sigm_binomial.m rename {subfunctions => demos/_models}/g_sigmoid.m (65%) rename {subfunctions => demos/_models}/g_softmax.m (90%) rename {subfunctions => demos/_models}/g_softmax4decoding.m (100%) rename {subfunctions => demos/_models}/g_ttest.m (88%) rename {subfunctions => demos/_models}/g_u2c.m (64%) rename {subfunctions => demos/_models}/g_u2p.m (67%) create mode 100644 demos/_models/g_vgo.m rename {subfunctions => demos/_models}/g_wrap_perseveration.m (90%) rename {subfunctions => demos/_models}/h_Id.m (100%) rename {subfunctions => demos/_models}/h_goNogo.m (100%) rename {subfunctions => demos/_models}/h_randOutcome.m (100%) rename {subfunctions => demos/_models}/h_truefalse.m (100%) rename {subfunctions => demos/_models}/h_whichItem.m (100%) rename {subfunctions => demos/_models}/v_discounting.m (97%) delete mode 100644 factorial_struct.m delete mode 100644 getF.m delete mode 100644 getHyperpriors.m delete mode 100644 getKernels.m delete mode 100644 isbinary.m delete mode 100644 isweird.m create mode 100644 legacy/GaussNewton.m create mode 100644 legacy/README.md create mode 100644 legacy/VBA_getISqrtMat.m rename VBA_groupBMCbtw.m => legacy/VBA_groupBMCbtw.m (100%) create mode 100644 legacy/VBA_sample.m create mode 100644 legacy/checkGX_binomial.m create mode 100644 legacy/cov2corr.m create mode 100644 legacy/empiricalHist.m create mode 100644 legacy/getHyperpriors.m create mode 100644 legacy/getSubplots.m create mode 100644 legacy/get_MCMC_predictiveDensity.m create mode 100644 legacy/get_MCMC_predictiveDensity_fb.m rename {subfunctions => legacy}/invSparsify.m (100%) create mode 100644 legacy/invsigmoid.m create mode 100644 legacy/isbinary.m create mode 100644 legacy/isweird.m create mode 100644 legacy/iswithin.m create mode 100644 legacy/numericDiff.m create mode 100644 legacy/sampleFromArbitraryP.m create mode 100644 legacy/sgm.m create mode 100644 legacy/sig.m create mode 100755 legacy/sigm.m create mode 100644 legacy/sigmoid.m create mode 100644 legacy/simulateNLSS.m create mode 100644 legacy/simulateNLSS_fb.m create mode 100644 legacy/sparseTransform.m create mode 100644 legacy/sparsify.m rename {subfunctions => legacy/trashbin}/ERP_dcm.m (100%) rename {subfunctions => legacy/trashbin}/compare_struct.m (100%) rename {subfunctions => legacy/trashbin}/create2dbf.m (100%) rename {subfunctions => legacy/trashbin}/extractFIR.m (60%) rename {subfunctions => legacy/trashbin}/getFamily.m (100%) rename {subfunctions => legacy/trashbin}/get_ARcov.m (100%) rename {subfunctions => legacy/trashbin}/gridL_binomial.m (97%) rename {stats&plots => legacy/trashbin}/hatch.m (100%) rename {subfunctions => legacy/trashbin}/isHrfStable.m (100%) rename {subfunctions => legacy/trashbin}/logExp.m (100%) rename {stats&plots => legacy/trashbin}/smooth2.m (100%) create mode 100644 legacy/vec.m rename {subfunctions => modules/DCM}/BOLD_parameters.m (100%) rename {subfunctions => modules/DCM}/addConfounds2dcm.m (100%) rename {subfunctions => modules/DCM}/dcm2vba.m (100%) rename {subfunctions => modules/DCM}/defaultHRFparams.m (100%) rename {subfunctions => modules/DCM}/extend_dcm.m (100%) rename {subfunctions => modules/DCM}/getOptions4dcm.m (98%) rename {subfunctions => modules/DCM}/getPriors.m (82%) rename {subfunctions => modules/DCM}/get_HRFparams.m (100%) rename {subfunctions => modules/DCM}/get_U_basis.m (96%) rename {subfunctions => modules/DCM}/prepare_dcm.m (100%) rename {subfunctions => modules/DCM}/prepare_fullDCM.m (100%) rename {subfunctions => modules/DCM}/u_Fourier.m (100%) rename {subfunctions => modules/DCM}/u_FourierComplete.m (100%) rename {subfunctions => modules/DCM}/u_GaussianBumps.m (100%) rename {subfunctions => modules/DCM}/u_RBF.m (100%) rename {subfunctions => modules/DCM}/vba2dcm.m (99%) rename {stats&plots => modules/GLM}/Contrast_MEbins.m (100%) rename {stats&plots => modules/GLM}/GLM_contrast.m (97%) rename {stats&plots => modules/GLM}/GLM_covComp.m (100%) rename {stats&plots => modules/GLM}/GLM_tolerance.m (100%) rename {stats&plots => modules/GLM}/PRESS_GLM.m (100%) rename {stats&plots => modules/GLM}/SEM_analysis0.m (98%) rename {stats&plots => modules/GLM}/lev_GLM.m (100%) rename {stats&plots => modules/GLM}/mediationAnalysis0.m (98%) rename {stats&plots => modules/GLM}/mediation_contrast.m (100%) rename {subfunctions => modules/OTO}/VarVolatility.m (100%) rename {subfunctions => modules/OTO}/expBinom.m (88%) rename {subfunctions => modules/OTO}/getPosteriorfromOTO.m (91%) rename {subfunctions => modules/OTO}/unwrapVBvolatileOTO.m (85%) rename {stats&plots => modules/classical_statistics}/bayesian_ttest.m (95%) rename {stats&plots => modules/classical_statistics}/doROC.m (96%) rename {classification => modules/classification}/BMM/MixtureOfBinomials.m (100%) rename {classification => modules/classification}/BMM/generateBMM.m (86%) rename {classification => modules/classification}/CRP/VB_CRP.m (100%) rename {classification => modules/classification}/CRP/simulate_CRP.m (87%) rename {classification => modules/classification}/GMM/PCA_MoG.m (100%) rename {classification => modules/classification}/GMM/VBA_MoG.m (95%) rename {classification => modules/classification}/GMM/VBA_projectMoG.m (99%) rename {classification => modules/classification}/GMM/VBEM_GM.m (99%) rename {classification => modules/classification}/GMM/dist.m (100%) rename {classification => modules/classification}/GMM/generateGMM.m (100%) rename {classification => modules/classification}/GMM/plotResults.m (100%) rename VBA_classification.m => modules/classification/VBA_classification.m (97%) rename {stats&plots => modules/random_field_theory}/RFT_Euler.m (100%) rename {stats&plots => modules/random_field_theory}/RFT_GLM_contrast.m (99%) rename {stats&plots => modules/random_field_theory}/RFT_Gtf.m (100%) rename {stats&plots => modules/random_field_theory}/RFT_Pval.m (100%) rename {stats&plots => modules/random_field_theory}/RFT_ReDisplay.m (99%) rename {stats&plots => modules/random_field_theory}/RFT_clusters.m (95%) rename {stats&plots => modules/random_field_theory}/RFT_expectedTopo.m (98%) rename {stats&plots => modules/random_field_theory}/RFT_localmax.m (97%) rename {stats&plots => modules/random_field_theory}/RFT_main.m (98%) rename {stats&plots => modules/random_field_theory}/RFT_rescaling.m (100%) rename {stats&plots => modules/random_field_theory}/RFT_smoothness.m (100%) rename {subfunctions => modules/theory_of_mind}/RecToMfunction.m (93%) rename {subfunctions => modules/theory_of_mind}/defIndlev.m (100%) rename {subfunctions => modules/theory_of_mind}/extractKernels.m (100%) rename {subfunctions => modules/theory_of_mind}/fplayer.m (100%) rename {subfunctions => modules/theory_of_mind}/get_VolterraInGames.m (96%) rename {subfunctions => modules/theory_of_mind}/prepare_agent.m (92%) rename {subfunctions => modules/theory_of_mind}/prepare_kToM.m (100%) rename {subfunctions => modules/theory_of_mind}/prepare_metaToM.m (98%) rename {subfunctions => modules/theory_of_mind}/runGame_2players.m (71%) rename {subfunctions => modules/theory_of_mind}/script_test_rec.m (86%) rename {subfunctions => modules/theory_of_mind}/sizeXrec.m (100%) rename {subfunctions => modules/theory_of_mind}/unwrapKTOM.m (93%) delete mode 100644 numericDiff.m delete mode 100644 sampleFromArbitraryP.m rename VBA_UNL0.m => sandbox/VBA_UNL0.m (99%) rename {subfunctions => sandbox}/demo_bayesian_ttest.m (81%) rename {subfunctions => sandbox}/demo_dummyUNL.m (100%) delete mode 100644 simulateNLSS_fb.m delete mode 100644 stats&plots/addBestLinearPredictor.m delete mode 100644 stats&plots/approxOnGrid.m delete mode 100644 stats&plots/displayUncertainSigmoid.m delete mode 100644 stats&plots/empiricalHist.m delete mode 100644 stats&plots/findCI.m delete mode 100644 stats&plots/fisher.m delete mode 100644 stats&plots/maxMat.m delete mode 100644 stats&plots/medianfilter0.m delete mode 100644 stats&plots/mnan.m delete mode 100644 stats&plots/nanzscore.m delete mode 100644 stats&plots/normalize.m delete mode 100644 stats&plots/pinvComplex.m delete mode 100644 stats&plots/removeOutliers.m delete mode 100644 stats&plots/snan.m delete mode 100644 stats&plots/sparsify.m delete mode 100644 stats&plots/testPower.m delete mode 100644 subfunctions/Av.m delete mode 100644 subfunctions/Contrast_MEbins.m delete mode 100644 subfunctions/GetGitPath.m delete mode 100644 subfunctions/SSE.m delete mode 100644 subfunctions/VBA_bernoulli.m delete mode 100644 subfunctions/balanced_accuracy.m delete mode 100644 subfunctions/checkF.m delete mode 100644 subfunctions/check_constrasts.m delete mode 100644 subfunctions/dMatdvec.m delete mode 100644 subfunctions/demo_AR1.m delete mode 100644 subfunctions/demo_BSL.m delete mode 100644 subfunctions/demo_HH.m delete mode 100644 subfunctions/demo_HodgkinHuxley.m delete mode 100755 subfunctions/demo_binomial.m delete mode 100644 subfunctions/demo_binomial_AdaptDesign.m delete mode 100755 subfunctions/demo_binomial_adapt.m delete mode 100755 subfunctions/demo_bmc4glm.m delete mode 100755 subfunctions/demo_dynsys.m delete mode 100644 subfunctions/demo_getConvKernel.m delete mode 100644 subfunctions/demo_irregular.m delete mode 100644 subfunctions/demo_linear.m delete mode 100644 subfunctions/demo_multisession.m delete mode 100644 subfunctions/demo_multisource.m delete mode 100644 subfunctions/demo_sparsePriors.m delete mode 100644 subfunctions/demo_sparsify.m delete mode 100644 subfunctions/demo_stochasticBinomial.m delete mode 100644 subfunctions/dsdv.m delete mode 100644 subfunctions/elvis.m delete mode 100644 subfunctions/f_Rossler.m delete mode 100644 subfunctions/f_SHC.m delete mode 100755 subfunctions/f_alpha.m delete mode 100644 subfunctions/f_embed_old.m delete mode 100644 subfunctions/f_lin2D.m delete mode 100644 subfunctions/f_rwl.m delete mode 100644 subfunctions/f_try.m delete mode 100644 subfunctions/f_vgo.m delete mode 100644 subfunctions/g_GLMsparse.m delete mode 100644 subfunctions/g_GLMsparse2.m delete mode 100644 subfunctions/g_Id.m delete mode 100644 subfunctions/g_conv_approx.m delete mode 100644 subfunctions/g_probit.m delete mode 100644 subfunctions/g_sigm.m delete mode 100644 subfunctions/g_sigm_binomial.m delete mode 100644 subfunctions/g_sqrtSig.m delete mode 100644 subfunctions/g_vgo.m delete mode 100644 subfunctions/get_sigmoidMoments.m delete mode 100644 subfunctions/invsigmoid.m delete mode 100644 subfunctions/iswithin.m delete mode 100644 subfunctions/medianfilter0.m delete mode 100644 subfunctions/multi2num.m delete mode 100644 subfunctions/multinomial_accuracy.m delete mode 100644 subfunctions/nanzscore.m delete mode 100644 subfunctions/num2multi.m delete mode 100644 subfunctions/parseargs.m delete mode 100644 subfunctions/sgm.m delete mode 100644 subfunctions/show_potential.m delete mode 100644 subfunctions/sig.m delete mode 100755 subfunctions/sigm.m delete mode 100644 subfunctions/sigmoid.m delete mode 100644 subfunctions/softmax.m delete mode 100644 subfunctions/softmaxMat.m delete mode 100644 subfunctions/sparseTransform.m delete mode 100644 subfunctions/spear.m delete mode 100644 subfunctions/unwrapKBSL.m delete mode 100644 subfunctions/util.m create mode 100644 tests/VBA_test.m create mode 100644 tests/demos/TestAllDemos.m create mode 100644 tests/utils/test_VBA_r2.m create mode 100644 tests/utils/test_VBA_random.m create mode 100644 tests/utils/test_VBA_sigmoid.m create mode 100644 tests/utils/test_VBA_sqrtm.m rename {subfunctions => thrid-party}/getGitInfo.m (100%) rename {stats&plots => thrid-party}/hist2.m (100%) rename {subfunctions => thrid-party}/nanfft.m (100%) rename {stats&plots => thrid-party}/spear.m (100%) mode change 100644 => 100755 thrid-party/spm/VBA_spm_gamrnd.mexa64 delete mode 100644 thrid-party/spm/VBA_spm_gamrnd.mexglx delete mode 100644 thrid-party/spm/VBA_spm_gamrnd.mexmaci mode change 100644 => 100755 thrid-party/spm/VBA_spm_gamrnd.mexmaci64 mode change 100644 => 100755 thrid-party/spm/VBA_spm_gamrnd.mexw32 mode change 100644 => 100755 thrid-party/spm/VBA_spm_gamrnd.mexw64 rename stats&plots/EVprodX.m => utils/VBA_EVprodX.m (92%) rename VBA_ElogBeta.m => utils/VBA_ElogBeta.m (91%) rename subfunctions/Elogsig.m => utils/VBA_Elogsig.m (54%) rename VBA_ExceedanceProb.m => utils/VBA_ExceedanceProb.m (53%) rename subfunctions/Fourier2DBF.m => utils/VBA_Fourier2DBF.m (97%) rename stats&plots/FtoR2.m => utils/VBA_FtoR2.m (60%) rename subfunctions/GaussNewton.m => utils/VBA_GaussNewton.m (97%) create mode 100644 utils/VBA_JensenShannon.m rename VBA_KL.m => utils/VBA_KL.m (93%) rename VBA_PP0.m => utils/VBA_PP0.m (93%) rename VBA_PPM.m => utils/VBA_PPM.m (98%) rename VBA_SavageDickey.m => utils/VBA_SavageDickey.m (95%) rename VBA_Shapley.m => utils/VBA_Shapley.m (97%) create mode 100644 utils/VBA_abs.m create mode 100644 utils/VBA_accuracy.m rename VBA_bDCM_lesion.m => utils/VBA_bDCM_lesion.m (96%) rename VBA_binomial.m => utils/VBA_binomial.m (100%) rename VBA_checkGN.m => utils/VBA_checkGN.m (100%) rename VBA_check_struct.m => utils/VBA_check_struct.m (100%) rename subfunctions/comparePredictions.m => utils/VBA_comparePredictions.m (88%) rename VBA_conv2glm.m => utils/VBA_conv2glm.m (100%) rename stats&plots/cov2corr.m => utils/VBA_cov2corr.m (56%) rename VBA_dcmMatrices.m => utils/VBA_dcmMatrices.m (100%) create mode 100644 utils/VBA_dirichlet_moments.m create mode 100644 utils/VBA_empiricalDensity.m create mode 100644 utils/VBA_factorial_struct.m create mode 100644 utils/VBA_finiteBinomial.m create mode 100644 utils/VBA_fisher.m rename VBA_getNtuples.m => utils/VBA_getNtuples.m (100%) rename getSubplots.m => utils/VBA_getSubplots.m (66%) rename VBA_getVar.m => utils/VBA_getVar.m (100%) create mode 100644 utils/VBA_guessHyperpriors.m create mode 100644 utils/VBA_indicator.m rename VBA_inv.m => utils/VBA_inv.m (100%) create mode 100644 utils/VBA_invComplex.m create mode 100644 utils/VBA_isBinary.m create mode 100644 utils/VBA_isInRange.m create mode 100644 utils/VBA_isWeird.m create mode 100644 utils/VBA_issymmetric.m rename {stats&plots => utils}/VBA_kstest.m (100%) rename VBA_logDet.m => utils/VBA_logDet.m (96%) create mode 100644 utils/VBA_logSumExp.m create mode 100644 utils/VBA_maxMat.m rename {subfunctions => utils}/VBA_nanmean.m (93%) rename {subfunctions => utils}/VBA_nanstd.m (100%) rename {subfunctions => utils}/VBA_nanvar.m (100%) rename {stats&plots => utils}/VBA_ncfcdf.m (100%) create mode 100644 utils/VBA_numericDiff.m rename subfunctions/optimCost.m => utils/VBA_optimCost.m (85%) rename VBA_orth.m => utils/VBA_orth.m (100%) create mode 100644 utils/VBA_power.m rename stats&plots/prodX.m => utils/VBA_prodX.m (75%) rename VBA_psi.m => utils/VBA_psi.m (100%) rename {stats&plots => utils}/VBA_quantile.m (97%) create mode 100644 utils/VBA_r2.m create mode 100644 utils/VBA_random.m create mode 100644 utils/VBA_removeOutliers.m create mode 100644 utils/VBA_sigmoid.m create mode 100644 utils/VBA_sign.m create mode 100644 utils/VBA_softmax.m create mode 100644 utils/VBA_sparsifyPrior.m create mode 100644 utils/VBA_sqrtm.m rename VBA_susceptibility.m => utils/VBA_susceptibility.m (99%) create mode 100644 utils/VBA_vec.m delete mode 100644 vec.m diff --git a/README.md b/README.md index bd9358dc..6ae6c9b6 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,69 @@ ## What is the VBA toolbox? -Most models of neurobiological and behavioural data can be broken down into processes that evolve over time and static observation mappings. Given these evolution and observation mappings, the toolbox can be used to simulate data, perform statistical data analysis, optimize the experimental design, etc... In brief, the toolbox provides: +Most models of neurobiological and behavioral data can be broken down into processes that evolve over time and static observation mappings. Given these evolution and observation mappings, the toolbox can be used to simulate data, perform statistical data analysis, optimize the experimental design, etc... In brief, the toolbox provides: * plug-and-play tools for classical statistical tests -* a library of computational models of behavioural and neurobiological data time series +* a library of computational models of behavioral and neurobiological data time series * quick and efficient probabilistic inference techniques for parameter estimation and model comparison (+ experimental design optimization) -* graphical visualization of results (+ advanced diagnostics of model inversion) - +* graphical visualization of results (+ advanced diagnostics of model inversion) + ## Requirements -This toolbox runs in Matlab. + +This toolbox runs in Matlab. Although it should run in all versions of Matlab, the toolbox has only been extensively tested on Matlab 2013 and higher. ## How do I install the toolbox? -1. Get the toolbox - - [fork](https://github.com/MBB-team/VBA-toolbox/fork) this repo and clone it on your computer ([help!](https://help.github.com/articles/fork-a-repo)) - - download and unzip the ~~[latest stable release]()~~ or the [current beta version](https://github.com/MBB-team/VBA-toolbox/archive/master.zip) -1. Add the toolbox folder to your Matlab path: + +#### Get the toolbox + +- Ideally, use [Git](https://git-scm.com/) to [clone](https://github.com/MBB-team/VBA-toolbox/clone) the [repo of the toolbox](https://github.com/MBB-team/VBA-toolbox) on your computer: + + ```bash + cd ~/path/to/parentDirectory + git clone https://github.com/MBB-team/VBA-toolbox.git + ``` + You will then be able to stay up to date with the latest versions using the command: + + ```bash + cd ~/path/to/parentDirectory/VBA-toolbox + git pull + ``` + +- If you don't want to install Git, you can alternatively download a zip of the [latest stable release](https://github.com/MBB-team/VBA-toolbox/archive/master.zip) + +#### Add the toolbox folder to your Matlab path: + ```matlab -cd path_to_/your/VBA_folder -VBA_setup +cd ~/path/to/parentDirectory/VBA-toolbox +VBA_setup() ``` -1. Enjoy! - + +Note that you might have do run `VBA_setup()` after an update of the toolbox (eg. after a `git pull`). + +#### Enjoy! + +You can now try one of the demos or tutorials you can find in the `VBA-toolbox/demos` folder. If you have a recent version of Matlab (>= 2017), you can also run `VBA_test()` to check that everything works as intended on your system. + +## Structure of the toolbox + +- `/` contains all the functions you can use directly in your scripts to call general routines like model simulation and inversion. +- `/core` contains the sub-functions that implement the internal algorithms of the toolbox, like the variational estimation scheme. You should not use those functions directly! +- `/demos` contains a large selection of computational models (ie. evolution and observation functions) you can use directly or adapt to your hypothesis. You will also find in this folder a series of demos that implement those models and tutorials demonstrating the various features of the toolbox. +- `/legacy` contains some old code that will soon disappear but we keep for backward compatibility. +- `/modules` contains a set of tools that complement the toolbox, like DCM generators or advanced models and scripts used in publications. +- `/sandbox` contains code in development that is not yet fully functional and / or tested. Feel free to test them, or wait a bit until they move to the core. +- `/tests` contains unit testing functions. This code help us ensure that the toolbox does what we want it to do... +- `/third-party` contains code we did write ourselves but is needed by the toolbox. +- `/utils` contains plenty of cool tools that you can use directly if you need, like random number generators, mathematical measures (eg. KL divergence), or nifty numerical tricks. ## Want more details? -Please visit the [wiki pages](http://mbb-team.github.io/VBA-toolbox/wiki/) for tutorials, demos and advanced features descriptions. + +Please visit the [wiki pages](http://mbb-team.github.io/VBA-toolbox/wiki/) for tutorials, demos, and advanced features descriptions. + +You can also seek help on our [dedicated forum](http://mbb-team.github.io/VBA-toolbox/forum/). We will always be happy to help you with the toolbox if you need. ## How can I participate? -This is a collaborative project, in that users can contribute to the toolbox either through [feedback](https://github.com/MBB-team/VBA-toolbox/issues), or directly by including new models for neurobiological and behavioural data. Critically, the toolbox has been developped in the aim of facilitating the inclusion of new models (without having to care about their statistical treatment). -Please send us your code or a [pull request](https://github.com/MBB-team/VBA-toolbox/pulls) so we can include your models in the next release! +The VBA-toolbox is an open-source, collaborative project. +We gladly welcome contributions at all levels: +- flag a bug or request a feature by creating a [new issue](https://github.com/MBB-team/VBA-toolbox/issues) on Github +- provide your model to other users by integrating it directly in the toolbox. Send us an email, or directly create a pull request. diff --git a/VBA_BMA.m b/VBA_BMA.m index 36b8867d..f1f2a964 100644 --- a/VBA_BMA.m +++ b/VBA_BMA.m @@ -1,111 +1,155 @@ -function [p_BMA] = VBA_BMA(p0,F0) +function [p_BMA] = VBA_BMA (p0, F0) +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% [p_BMA] = VBA_BMA (p0, F0) % performs Bayesian Model Averaging (BMA) -% function [p,o] = VBA_BMA(posterior,F) +% % IN: -% - p0: a Kx1 cell-array of VBA posterior structures, which are -% conditional onto specific generative models (where K is the number of -% models) -% - F0: a Kx1 vector of log-model evidences +% - p0: a Kx1 array of VBA posterior structures, which are +% conditional onto specific generative models +% - F0: a Kx1 vector of the respective log-model evidences % OUT: -% - p_BMA: the resulting posterior structure, with the first two moments of -% the marginal probability density functions +% - p_BMA: the resulting posterior structure that describe the marginal +% (over models) probability density functions +% +% ///////////////////////////////////////////////////////////////////////// -K = length(p0); % # models -ps = softmax(F0); +% for retrocompatibility, accept cell array of posterior +if iscell (p0) + p0 = cell2mat (p0); +end + +% shortcuts +% ========================================================================= +% number of models +K = length (p0); + +% posterior model probabilities +% ========================================================================= +ps = VBA_softmax (F0); + +% perform averaging +% ========================================================================= % observation parameters -mus = cell(K,1); -Qs = cell(K,1); -for k=1:K - mus{k} = p0{k}.muPhi; - Qs{k} = p0{k}.SigmaPhi; +% ------------------------------------------------------------------------- +try + [p_BMA.muPhi, p_BMA.SigmaPhi] = averageMoments ({p0.muPhi}, {p0.SigmaPhi}, ps); end -[p_BMA.muPhi,p_BMA.SigmaPhi] = get2moments(mus,Qs,ps); % evolution parameters -mus = cell(K,1); -Qs = cell(K,1); -for k=1:K - mus{k} = p0{k}.muTheta; - Qs{k} = p0{k}.SigmaTheta; +% ------------------------------------------------------------------------- +try + [p_BMA.muTheta, p_BMA.SigmaTheta] = averageMoments ({p0.muTheta}, {p0.SigmaTheta}, ps); end -[p_BMA.muTheta,p_BMA.SigmaTheta] = get2moments(mus,Qs,ps); % initial conditions -mus = cell(K,1); -Qs = cell(K,1); -for k=1:K - mus{k} = p0{k}.muX0; - Qs{k} = p0{k}.SigmaX0; +% ------------------------------------------------------------------------- +try + [p_BMA.muX0, p_BMA.SigmaX0] = averageMoments ({p0.muX0}, {p0.SigmaX0}, ps); end -[p_BMA.muX0,p_BMA.SigmaX0] = get2moments(mus,Qs,ps); + % hidden states -T = size(p0{1}.muX,2); +% ------------------------------------------------------------------------- try -for t=1:T - mus = cell(K,1); - Qs = cell(K,1); - for k=1:K - mus{k} = p0{k}.muX(:,t); - Qs{k} = p0{k}.SigmaX.current{t}; + % number of timepoints + T = size (p0(1).muX, 2); + % initialisation + mus = cell (K, 1); + Qs = cell (K, 1); + % loop over timepoints + for t = 1 : T + % collect moments + for k=1:K + mus{k} = p0(k).muX(:, t); + Qs{k} = p0(k).SigmaX.current{t}; + end + % compute average + [p_BMA.muX(:, t), p_BMA.SigmaX.current{t}] = averageMoments (mus, Qs, ps); end - [p_BMA.muX(:,t),p_BMA.SigmaX.current{t}] = get2moments(mus,Qs,ps); -end end -% data precision -mus = cell(K,1); -Qs = cell(K,1); -for iS=1:numel(p0{1}.a_sigma) % loop over sources - for k=1:K - mus{k} = p0{k}.a_sigma(iS)/p0{k}.b_sigma(iS); - Qs{k} = p0{k}.a_sigma(iS)/p0{k}.b_sigma(iS)^2; +% observation precision +% ------------------------------------------------------------------------- +try + % number of gaussian sources + nS = numel (p0(1).a_sigma); + % initialisation + mus = cell (K, 1); + Qs = cell (K, 1); + % loop over sources + for iS = 1 : nS + % collect moments + for k = 1 : K + mus{k} = p0(k).a_sigma(iS) / p0(k).b_sigma(iS); + Qs{k} = p0(k).a_sigma(iS) / p0(k).b_sigma(iS) ^ 2; + end + % compute average + [m, v] = averageMoments (mus, Qs, ps); + % map to gamma distribution parameters + p_BMA.b_sigma(iS) = m / v; + p_BMA.a_sigma(iS) = m * p_BMA.b_sigma(iS); end - [m,v] = get2moments(mus,Qs,ps); - p_BMA.b_sigma(iS) = m/v; - p_BMA.a_sigma(iS) = m*p_BMA.b_sigma(iS); end % hidden state precision -mus = cell(K,1); -Qs = cell(K,1); -id = zeros(K,1); - -for k=1:K - mus{k} = p0{k}.a_alpha/p0{k}.b_alpha; - Qs{k} = p0{k}.a_alpha/p0{k}.b_alpha^2; - if (isempty(p0{k}.a_alpha) && isempty(p0{k}.b_alpha)) || (isinf(p0{k}.a_alpha) && p0{k}.b_alpha==0) - id(k) = 1; +% ------------------------------------------------------------------------- +% initialisation +mus = cell (K, 1); +Qs = cell (K, 1); +isStochastic = nan (K, 1); +% collect moments, if any +for k = 1 : K + try + mus{k} = p0(k).a_alpha / p0(k).b_alpha; + Qs{k} = p0(k).a_alpha / p0(k).b_alpha ^ 2; + isStochastic(k) = ~ isempty(mus{k}) && ~ isinf(mus{k}); + catch + isStochastic(k) = false; end end - -if isequal(sum(id),K) % all deterministic systems +% compute average, if meaninful +% + all deterministic systems +if ~ any (isStochastic) p_BMA.b_alpha = Inf; p_BMA.a_alpha = 0; -elseif isequal(sum(id),0) % all stochastic systems - [m,v] = get2moments(mus,Qs,ps); - p_BMA.b_alpha = m/v; - p_BMA.a_alpha = m*p_BMA.b_alpha; +% + all stochastic systems +elseif all (isStochastic) + % average moments + [m, v] = averageMoments(mus, Qs, ps); + % map to gamma distribution parameters + p_BMA.b_alpha = m / v; + p_BMA.a_alpha = m * p_BMA.b_alpha; else - disp('VBA_MBA: Warning: mixture of deterministic and stochastic models!') - p_BMA.b_alpha = Inf; - p_BMA.a_alpha = 0; +% + mixture of stochastic and deterministic systems + disp('VBA_MBA: Warning! mixture of deterministic and stochastic models!') + p_BMA.b_alpha = NaN; + p_BMA.a_alpha = NaN; end +end +% ######################################################################### +% Subfunctions +% ######################################################################### - -function [m,V] = get2moments(mus,Qs,ps) +% Compute averages of 1st order (mus) and 2nd order (Qs) moments of +% distributions, weigthed by ps. +function [m, V] = averageMoments (mus, Qs, ps) +% initialisation V = zeros(size(Qs{1})); m = zeros(size(mus{1})); K = length(ps); -for k=1:K - m = m + ps(k).*mus{k}; +% average 1st order moments +for k = 1 : K + m = m + ps(k) .* mus{k}; end -for k=1:K +% average 2nd order moments +for k = 1 : K tmp = mus{k} - m; - V = V + ps(k).*(tmp*tmp' + Qs{k}); + V = V + ps(k) .* (tmp * tmp' + Qs{k}); +end end \ No newline at end of file diff --git a/VBA_EKF.m b/VBA_EKF.m index 986b329d..44dcf3e7 100644 --- a/VBA_EKF.m +++ b/VBA_EKF.m @@ -37,6 +37,14 @@ % - SigmaX: covariance matrices of the variational posterior pdf of % the dynamic hidden-states. +if numel(options.sources) > 1 + error('*** EKF is not yet compatible with multisource observations'); +end + +if options.sources.type > 1 + error('*** EKF is not yet compatible with multinomial observations'); +end + % By default, this function implements an EKF: if ~exist('flag','var') || isempty(flag) flag = 1; @@ -101,7 +109,7 @@ case {1,2} try alpha = posterior.a_alpha(end)./posterior.b_alpha(end); - if ~options.binomial + if options.sources.type == 0 sigma = posterior.a_sigma(end)./posterior.b_sigma(end); end catch @@ -129,7 +137,7 @@ %--- Prediction [fx0,dF_dX0] = VBA_evalFun('f',X0,theta,u(:,1),options,dim,1); mStar(:,1) = fx0; - Rp = dF_dX0'*SigmaX0*dF_dX0 + 1./alpha.*VBA_inv(iQx{1},[]); + Rp = dF_dX0'*SigmaX0*dF_dX0 + 1./alpha.*VBA_inv(iQx{1}); if flag == 1 % EKF update [gx(:,1),dG_dX] = VBA_evalFun('g',mStar(:,1),phi,u(:,1),options,dim,1); iRp = pinv(Rp); @@ -144,7 +152,7 @@ % get predicted observation at the mode [gx(:,1),dG_dX] = VBA_evalFun('g',muX(:,1),phi,u(:,1),options,dim,1); suffStat.dy(:,1) = y(:,1) - gx(:,1); - if ~options.binomial + if options.sources.type == 0 suffStat.vy(:,1) = diag( sigma.^-1.*pinv(iQy{1}) + dG_dX'*SigmaX{1}*dG_dX ); suffStat.dy2 = suffStat.dy2 + suffStat.dy(:,1)'*iQy{1}*suffStat.dy(:,1); else @@ -167,7 +175,7 @@ %-- Prediction [fx,dF_dX] = VBA_evalFun('f',muX(:,t),theta,u(:,t+1),options,dim,t+1); mStar(:,t+1) = fx; - Rp = dF_dX'*SigmaX{t}*dF_dX + 1./alpha.*VBA_inv(iQx{t+1},[]); + Rp = dF_dX'*SigmaX{t}*dF_dX + 1./alpha.*VBA_inv(iQx{t+1}); if flag == 1 % EKF update [gx(:,t+1),dG_dX] = VBA_evalFun('g',mStar(:,t+1),phi,u(:,t+1),options,dim,t+1); C = dG_dX*iQy{t+1}*dG_dX'; @@ -182,7 +190,7 @@ % get predicted observation at the mode [gx(:,t+1),dG_dX] = VBA_evalFun('g',muX(:,t+1),phi,u(:,t+1),options,dim,t+1); suffStat.dy(:,t+1) = y(:,t+1) - gx(:,t+1); - if ~options.binomial + if options.sources.type == 0 suffStat.vy(:,t+1) = diag( sigma.^-1.*pinv(iQy{t+1}) + dG_dX'*SigmaX{t+1}*dG_dX ); suffStat.dy2 = suffStat.dy2 + suffStat.dy(:,t+1)'*iQy{t+1}*suffStat.dy(:,t+1); else diff --git a/VBA_Iphi_extended.m b/VBA_Iphi_extended.m index adfe5fd5..8436a920 100644 --- a/VBA_Iphi_extended.m +++ b/VBA_Iphi_extended.m @@ -46,7 +46,7 @@ end div = 0; -isTout = sum(options.isYout,1)==size(options.isYout,1); +isTout = all (options.isYout, 1); %--- Loop over time series ---% for t=1:dim.n_t diff --git a/VBA_JensenShannon.m b/VBA_JensenShannon.m deleted file mode 100644 index a047790e..00000000 --- a/VBA_JensenShannon.m +++ /dev/null @@ -1,82 +0,0 @@ -function [DJS,b,muy,Vy] = VBA_JensenShannon(mus,Qs,ps,binomial,base) -% evaluates the Jensen-Shannon divergence (DJS) -% function [DJS,b] = VBA_JensenShannon(mus,Qs,ps,binomial) -% This function evaluates the DJS: -% - either from a set of N-D Gaussian densities, in which case those are -% defined through their first- and second-order moments, -% - or binomial densities, in which case the first-order moments are -% sufficient. -% In addition, a set of individual weights for each of the n component -% densities should be provided. -% IN: -% - mus: nx1 cell array of 1st-order moments -% - Qs: nx1 cell array of 2nd-order moments -% - ps: nx1 vector of weights -% - binomial: flag for binomial densities {0} -% - base: base for the log mapping (can be '2' or '10') {'10'} -% OUT: -% - DJS: the Jensen-Shannon divergence -% - b: the associated lower-bound on the ensuing probability of -% classicafication error -% - muy/Vy: the 1st- and 2d- order moments of the Laplace approx to the -% mixture -% SEE ALSO: ProbError, ProbErrorND - -try,binomial;catch,binomial=0;end -try,base;catch,base='2';end - -n = length(mus); % number of models to be compared -if ~binomial - Vy = zeros(size(Qs{1})); -end -muy = zeros(size(mus{1})); -sH = 0; - -for i=1:n - % get first order moment of mixture - muy = muy + ps(i).*mus{i}; - % get weighted sum of entropies - if binomial - sH = sH -sum(mus{i}.*LOG(mus{i},base)) -sum((1-mus{i}).*LOG((1-mus{i}),base)); - else - [e] = eig(full(Qs{i})); - logDet = sum(LOG(e,base)); - sH = sH + 0.5*ps(i).*logDet; - end -end - -% get mixture entropy -if binomial - Hy = -sum(muy.*LOG(muy,base)) -sum((1-muy).*LOG((1-muy),base)); -else - % get second order moment of sum of densities - for i=1:n - tmp = mus{i} - muy; - tmp = tmp*tmp' + Qs{i}; - Vy = Vy + ps(i).*tmp; - end - % get Gaussian approx entropy - [e] = eig(full(Vy)); - Hy = 0.5*sum(LOG(e,base)); -end - -% get Jensen-Shannon approximation -DJS = Hy - sH; - -% get error probability upper bound -Hp = -sum(ps.*LOG(ps,base)); -b = max([-Inf,Hp - DJS]); - - -function lo = LOG(x,base) -switch base - case '2' - lo = log2(x); - case '10' - lo = log(x); -end - - - - - \ No newline at end of file diff --git a/VBA_LMEH0.m b/VBA_LMEH0.m index 4ad328c8..62c281d7 100644 --- a/VBA_LMEH0.m +++ b/VBA_LMEH0.m @@ -21,7 +21,7 @@ y_i = options.sources(s_g(s)).out ; y_s = y(y_i,:); y_s = y_s(options.isYout(y_i,:)==0); - LLH0 = LLH0 + lev_GLM(vec(y_s),ones(numel(y_s),1)); + LLH0 = LLH0 + lev_GLM(VBA_vec(y_s),ones(numel(y_s),1)); % try % a0 = options.priors.a_sigma(s); % b0 = options.priors.b_sigma(s); diff --git a/get_MCMC_predictiveDensity.m b/VBA_MCMC_predictiveDensity.m similarity index 87% rename from get_MCMC_predictiveDensity.m rename to VBA_MCMC_predictiveDensity.m index 9efb6bc5..47a96bc2 100644 --- a/get_MCMC_predictiveDensity.m +++ b/VBA_MCMC_predictiveDensity.m @@ -1,8 +1,8 @@ -function [pX,gX,pY,gY,X,Y] = get_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim,N,np,lx,ly) +function [pX,gX,pY,gY,X,Y] = VBA_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim,N,np,lx,ly) % prior predictive density under sDCM generative model (MCMC) % function [pX,gX,pY,gY,X,Y] = -% get_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim,N,np) +% VBA_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim,N,np) % IN: % - f_fname: name/handle of the evolution function % - g_fname: name/handle of the observation function @@ -59,8 +59,13 @@ out = []; for i=1:N [x0,theta,phi] = sampleFromPriors(options,dim); - [y,x] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0); - if ~isweird(y) && ~isweird(x) && isInRange(x,lx) && isInRange(y,ly) + try + [y,x] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0); + ok = ~ VBA_isWeird ({x, y}) && VBA_isInRange (x, lx) && VBA_isInRange (y, ly); + catch + ok = false; + end + if ok Y(:,:,i) = y; X(:,:,i) = x; fprintf(1,repmat('\b',1,8)) @@ -133,7 +138,7 @@ if dim.n > 0 if ~isequal(priors.SigmaX0,zeros(size(priors.SigmaX0))) - sV = VBA_getISqrtMat(priors.SigmaX0,0); + sV = VBA_sqrtm (priors.SigmaX0); x0 = priors.muX0 + sV*randn(dim.n,1); else x0 = priors.muX0; @@ -144,7 +149,7 @@ if dim.n_theta > 0 if ~isequal(priors.SigmaTheta,zeros(size(priors.SigmaTheta))) - sV = VBA_getISqrtMat(priors.SigmaTheta,0); + sV = VBA_sqrtm(priors.SigmaTheta); theta = priors.muTheta + sV*randn(dim.n_theta,1); else theta = priors.muTheta; @@ -155,7 +160,7 @@ if dim.n_phi > 0 if ~isequal(priors.SigmaPhi,zeros(size(priors.SigmaPhi))) - sV = VBA_getISqrtMat(priors.SigmaPhi,0); + sV = VBA_sqrtm (priors.SigmaPhi); phi = priors.muPhi + sV*randn(dim.n_phi,1); else phi = priors.muPhi; @@ -165,16 +170,3 @@ end - -function flag = isInRange(x,lx) -if isempty(x) || isempty(lx) - flag = 1; - return -end -nx = size(x,1); -flag = 1; -for i=1:nx - flag = flag & ~any(x(i,:)lx(i,2)); -end - - diff --git a/get_MCMC_predictiveDensity_fb.m b/VBA_MCMC_predictiveDensity_fb.m similarity index 87% rename from get_MCMC_predictiveDensity_fb.m rename to VBA_MCMC_predictiveDensity_fb.m index fcba980f..6ebce5a3 100644 --- a/get_MCMC_predictiveDensity_fb.m +++ b/VBA_MCMC_predictiveDensity_fb.m @@ -1,8 +1,8 @@ -function [pX,gX,pY,gY,X,Y,U] = get_MCMC_predictiveDensity_fb(f_fname,g_fname,u,n_t,options,dim,fb,N,np,lx,ly) +function [pX,gX,pY,gY,X,Y,U] = VBA_MCMC_predictiveDensity_fb(f_fname,g_fname,u,n_t,options,dim,fb,N,np,lx,ly) % prior predictive density under sDCM generative model (MCMC) % function [pX,gX,pY,gY,X,Y] = -% get_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim,N,np) +% VBA_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim,N,np) % IN: % - f_fname: name/handle of the evolution function % - g_fname: name/handle of the observation function @@ -53,9 +53,9 @@ alpha = []; end -if ~options.binomial +try % if there are gaussian sources sigma = options.priors.a_sigma./options.priors.b_sigma; -else +catch sigma = []; end @@ -73,8 +73,8 @@ out = []; for i=1:N [x0,theta,phi] = sampleFromPriors(options,dim); - [y,x,x0,eta,e,u] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0,fb); - if ~isweird(y) && ~isweird(x) && isInRange(x,lx) && isInRange(y,ly) + [y,x,x0,eta,e,u] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0,fb); + if ~ VBA_isWeird ({x, y}) && VBA_isInRange (x, lx) && VBA_isInRange (y, ly) Y(:,:,i) = y; X(:,:,i) = x; U(:,:,i) = u; @@ -151,7 +151,7 @@ if dim.n > 0 if ~isequal(priors.SigmaX0,zeros(size(priors.SigmaX0))) - sV = VBA_getISqrtMat(priors.SigmaX0,0); + sV = VBA_sqrtm (priors.SigmaX0); x0 = priors.muX0 + sV*randn(dim.n,1); else x0 = priors.muX0; @@ -162,7 +162,7 @@ if dim.n_theta > 0 if ~isequal(priors.SigmaTheta,zeros(size(priors.SigmaTheta))) - sV = VBA_getISqrtMat(priors.SigmaTheta,0); + sV = VBA_sqrtm (priors.SigmaTheta); theta = priors.muTheta + sV*randn(dim.n_theta,1); else theta = priors.muTheta; @@ -173,7 +173,7 @@ if dim.n_phi > 0 if ~isequal(priors.SigmaPhi,zeros(size(priors.SigmaPhi))) - sV = VBA_getISqrtMat(priors.SigmaPhi,0); + sV = VBA_sqrtm (priors.SigmaPhi); phi = priors.muPhi + sV*randn(dim.n_phi,1); else phi = priors.muPhi; @@ -182,17 +182,3 @@ phi = []; end - - -function flag = isInRange(x,lx) -if isempty(lx) - flag = 1; - return -end -nx = size(x,1); -flag = 1; -for i=1:nx - flag = flag & ~any(x(i,:)lx(i,2)); -end - - diff --git a/VBA_MFX.m b/VBA_MFX.m index 16e042e8..6b3189ee 100644 --- a/VBA_MFX.m +++ b/VBA_MFX.m @@ -375,10 +375,10 @@ fprintf([' done.','\n']) o_group.date = clock; o_group.dt = toc(o_group.tStart); -o_group.options.binomial = o_sub{1}.options.binomial; +o_group.options.sources = o_sub{1}.options.sources; for i=1:ns o_group.within_fit.F(i) = o_sub{i}.F(end); - o_group.within_fit.R2(i) = o_sub{i}.fit.R2; + o_group.within_fit.R2(i,:) = o_sub{i}.fit.R2; o_group.within_fit.LLH0(i) = VBA_LMEH0(o_sub{i}.y,o_sub{i}.options); o_sub{i}.options.kernelSize = kernelSize0; [tmp,o_sub{i}] = VBA_getDiagnostics(p_sub{i},o_sub{i}); @@ -427,7 +427,11 @@ V(indrfx,indrfx) = VBA_inv(iV0(indrfx,indrfx)+ns*iQ); m(indrfx) = V(indrfx,indrfx)*(iV0(indrfx,indrfx)*m0(indrfx)+iQ*sm); a(indrfx) = a0(indrfx) + 0.5*ns; -b(indrfx) = b0(indrfx) + 0.5*(sv(indrfx)+ns*diag(V(indrfx,indrfx))); +% b(indrfx) = b0(indrfx) + 0.5*(sv(indrfx)+ns*diag(V(indrfx,indrfx))); +% fix: do not index because 'sv' is by definition updated only for the rfx +% parameters +b(indrfx) = b0(indrfx) + 0.5*(sv+ns*diag(V(indrfx,indrfx))); % do not index sv b + % FFX if ~isempty(indffx) tmp = VBA_inv(sP); diff --git a/VBA_NLStateSpaceModel.m b/VBA_NLStateSpaceModel.m index 8a6ea8e1..5a79b588 100644 --- a/VBA_NLStateSpaceModel.m +++ b/VBA_NLStateSpaceModel.m @@ -1,8 +1,7 @@ - function [posterior,out] = VBA_NLStateSpaceModel(y,u,f_fname,g_fname,dim,options,in) - -% VB inversion of nonlinear stochastic DCMs -% function [posterior,out] = VBA_NLStateSpaceModel(y,u,f_fname,g_fname,dim,options,in) +% Generic model inversion routine +% [posterior,out] = VBA_NLStateSpaceModel(y,u,f_fname,g_fname,dim,options,in) +% % This function inverts any nonlinear state-space model of the form: % y_t = g( x_t,u_t,phi ) + e_t % x_t = f( x_t-1,u_t,theta ) + f_t @@ -107,11 +106,24 @@ % NB: nonzero delays induces an embedding of the system, thereby % increasing the computational demand of the inversion. The state % space dimension if multiplied by max(options.delays)+1! -% .binomial: {0}. If 1, it is assumed that the likelihood function is -% a binomial pdf, whose probability is given by the observation -% function, i.e. g(phi) = p(y=1|phi). The Laplace approximation on -% observation parameters still holds and the i/o of the inversion -% routine is conserved. +% .sources: a structure or array of structures defining the probability +% distribution of the observation. If the observations are +% homogenous, this structure can contain a unique field 'type' whose +% value can be: +% - {0} for normally distributed observation, ie. y = g(x,phi) + eps +% where eps ~ N(0,sigma^2 Id) with sigma^2 is itsel defined by +% hyperparameters a_sigma and b_sigma. +% - 1 for binary observations. It is then assumed that the +% likelihood function is a binomial pdf, whose probability is +% given by the observation function, i.e. g(phi) = p(y=1|phi). +% The Laplace approximation on observation parameters still holds +% and the i/o of the inversion routine is conserved. +% - 2 for categorical data. +% If the observations are composed of a concatenation of differently +% distributed variables, sources should be an array of structures, +% each defining the distribution type as above, and also containing +% a field 'out' that give the index of the observations covered by the +% distribution. % .figName: the name of the display window % - in: structure variable containing the output of a previously ran % VBA_NLStateSpaceModel.m routine. Providing this input to the function @@ -225,7 +237,7 @@ % Check input arguments consistency (and fill in priors if necessary) [options,u,dim] = VBA_check(y,u,f_fname,g_fname,dim,options); - if isweird(y(~options.isYout)) + if VBA_isWeird (y(~ options.isYout)) disp('Error: VBA detected a numerical issue with provided data!') return end @@ -331,7 +343,7 @@ %--------------- Termination condition ---------------% dF = diff(suffStat.F); dF = dF(end); - if ( ( (abs(dF)<=options.TolFun)||it==options.MaxIter ) && it >=options.MinIter ) || isweird(dF) + if ( ( (abs(dF)<=options.TolFun)||it==options.MaxIter ) && it >=options.MinIter ) || VBA_isWeird (dF) stop = 1; if abs(dF) <= options.TolFun out.CV = 1; diff --git a/VBA_ReDisplay.m b/VBA_ReDisplay.m index 1d9dd8fa..a3e74d09 100644 --- a/VBA_ReDisplay.m +++ b/VBA_ReDisplay.m @@ -196,7 +196,7 @@ function myDeterministic(hfig) % Display precision hyperparameters VBA_updateDisplay(posterior,suffStat,options,y,0,'precisions') - if ~options.OnLine && ~options.binomial + if ~options.OnLine && any([options.sources.type]==0) xlabel(options.display.ha(6),' ') if numel(options.display.ha)>7 && ishghandle(options.display.ha(8)) xlabel(options.display.ha(8),' ') @@ -214,7 +214,7 @@ function myDeterministic(hfig) VBA_updateDisplay(posterior,suffStat,options,y,0,'phi') end - try getSubplots ; end + try, VBA_getSubplots (); end uicontrol( ... 'parent' , hfig , ... @@ -272,7 +272,7 @@ function myPriors(hfig) % Display precision hyperparameters VBA_updateDisplay(posterior,suffStat,options,y,0,'precisions') - if ~options.OnLine && ~options.binomial + if ~options.OnLine && any([options.sources.type]==0) xlabel(options.display.ha(6),' ') ; if numel(options.display.ha)>7 && ishghandle(options.display.ha(8)) xlabel(options.display.ha(8),' ') ; @@ -290,7 +290,7 @@ function myPriors(hfig) VBA_updateDisplay(posterior,suffStat,options,y,0,'phi') end - try getSubplots ; end + try, VBA_getSubplots (); end function myConv(hfig) @@ -338,7 +338,7 @@ function myConv(hfig) ylabel(ha,'Free energy differences') box(ha,'off') try - getSubplots ; + VBA_getSubplots (); end end @@ -422,7 +422,7 @@ function myKerneli(hObject,evt) ud.handles = handles; set(hfig,'userdata',ud); - try, getSubplots; end + try, VBA_getSubplots (); end function myDiagnostics(hfig) @@ -508,7 +508,7 @@ function myDiagnostics(hfig) colormap(display.ha(6),col); try display.hc(2) = colorbar('peer',display.ha(6)); end - try getSubplots; end + try VBA_getSubplots (); end function myDiagnosticsi(hObject,evt,si) @@ -560,7 +560,7 @@ function myDiagnosticsi(hObject,evt,si) xlabel(handles.hdiagnostics(2),ti,'fontsize',8) ylabel(handles.hdiagnostics(2),'e(t) = y(t)-g(x(t))','fontsize',8) % display autocorrelation of residuals - if ~isweird(dy.R) && out.dim.n_t > 1 + if ~ VBA_isWeird (dy.R) && out.dim.n_t > 1 handles.hdiagnostics(3) = subplot(4,2,7,'parent',hPanel); plot(handles.hdiagnostics(3),[-out.options.dim.n_t:out.options.dim.n_t-1],fftshift(dy.R)') axis(handles.hdiagnostics(3),'tight') @@ -571,7 +571,7 @@ function myDiagnosticsi(hObject,evt,si) end ud.handles = handles; set(hfig,'userdata',ud); - try, getSubplots; end + try, VBA_getSubplots (); end function myVB(hfig) @@ -621,7 +621,7 @@ function myVB(hfig) if dim.n_phi >= 1 VBA_updateDisplay(posterior,suffStat,options,y,0,'phi') end - try getSubplots; end + try VBA_getSubplots (); end %% helpers diff --git a/VBA_designEfficiency.m b/VBA_designEfficiency.m index fab0b569..074b78f6 100644 --- a/VBA_designEfficiency.m +++ b/VBA_designEfficiency.m @@ -31,9 +31,7 @@ switch flag case 'models' - - try,binomial=options{1}.binomial;catch,binomial=0;end - + nm = length(dim); if ~iscell(f_fname) f_fname0 = f_fname; @@ -65,11 +63,9 @@ end % computes Jensen-Shannon divergence of models - ps = ones(nm,1); - ps = ps./sum(ps); fprintf(1,'\n') fprintf(1,['Deriving design efficiency... ']) - [DJS,b] = VBA_JensenShannon(muy,Vy,ps,binomial); + [DJS,b] = VBA_JensenShannon(muy,Vy,options{1}.sources); fprintf(1,[' OK.']) fprintf(1,'\n') e = DJS; @@ -78,9 +74,7 @@ out.Vy = Vy; case 'parameters' - - try,binomial=options.binomial;catch,binomial=0;end - + % get prediced model parameter precision matrix [~,~,iVp] = VBA_getLaplace(u,f_fname,g_fname,dim,options,0,'skip'); % compute the trace of predicted posterior variance diff --git a/VBA_disp.m b/VBA_disp.m deleted file mode 100644 index 7938565a..00000000 --- a/VBA_disp.m +++ /dev/null @@ -1,19 +0,0 @@ -function VBA_disp(str,options) -% display string or cell-arry of strings -try - verbose = options.verbose; -catch - verbose = 1; -end -% conditional display function -if verbose - if iscell(str) - n = length(str); - for i=1:n - fprintf(1,str{i}) - end - else - fprintf(1,str) - end - fprintf('\n') -end \ No newline at end of file diff --git a/VBA_getISqrtMat.m b/VBA_getISqrtMat.m deleted file mode 100644 index f9b54c08..00000000 --- a/VBA_getISqrtMat.m +++ /dev/null @@ -1,34 +0,0 @@ -function S = VBA_getISqrtMat(C,inv) -% quick derivation of the (inverse) square root of a matrix -% function S = VBA_getISqrtMat(C,inv) -% IN: -% - C: the entry matrix -% - inv: binary flag for inverse square root (inv=1) or square root -% (inv=0) matrix operator -% OUT: -% - S: the (inverse) square root of C. - -if nargin < 2 - inv = 1; -else - inv = ~~inv; -end -C(C==Inf) = 1e8; % dirty fix for infinite precision matrices -if sum(C(:)) ~= 0 - if isequal(C,diag(diag(C))) - if inv - S = diag(sqrt(diag(C).^-1)); - else - S = diag(sqrt(diag(C))); - end - else - [U,s,V] = svd(full(C)); - if inv - S = U*diag(sqrt(diag(s).^-1))*V'; - else - S = U*diag(sqrt(diag(s)))*V'; - end - end -else - S = 0; -end diff --git a/VBA_getKernels.m b/VBA_getKernels.m deleted file mode 100644 index 457f0558..00000000 --- a/VBA_getKernels.m +++ /dev/null @@ -1,82 +0,0 @@ -function [H1,K1,tgrid] = VBA_getKernels(posterior,out,dcm) -% Dummy derivation of the system's response kernels -% function [H1,K1,tgrid] = VBA_getKernels(posterior,out,dcm) -% IN: -% - posterior,out: the output of the system inversion -% - dcm: flag for dcm (does not compute hemodynamic states kernels) -% OUT: -% - H1: the (pxtxnu) output impulse response function, where p is the -% dimension of the data, t is the number of time samples and nu is the -% number of inputs to the system -% - K1: the (nxtxnu) state impulse response function, where n is the -% number of states. NB: for DCM models, this is the neural impulse -% response function... -% - tgrid: the time grid over which the kernels are estimated - -if isequal(out.dim.n_t,1) || out.dim.u < 1 || isempty(out.options.f_fname) - % not a dynamical system - H1 = []; - K1 = []; - tgrid = []; - return -end - -if nargin <3 - dcm = 0; -else - dcm = ~~dcm; -end - -nu = out.dim.u; - -n = out.dim.n; -p = out.dim.p; -out.options.microU = 1; % for impulse response functions... -if dcm - % remove confounds,... - out.options.inG.confounds.X0 = []; - % only look at neuronal states,... - n = p; %size(out.options.inF.C,1); - % and get kernels over 16 secs - TR = out.options.inF.deltat*out.options.decim; - out.options.dim.n_t = ceil(16./TR); -end -nt = out.options.dim.n_t*out.options.decim; - -% ensure steady state initial conditions and throw away state noise -% estimate -posterior.muX0 = zeros(size(posterior.muX0)); -out.suffStat.dx = []; - -% pre-allocate response kernels -H1 = zeros(p,nt,nu); -K1 = zeros(n,nt,nu); - -% derive kernels by integrating the system -gotit = 0; -for i=1:nu - try - U = zeros(nu,nt); - U(i,1) = 1; - % get output impulse response - [x,gx,tgrid] = VBA_microTime(posterior,U,out); - H1(:,:,i) = gx(:,2:end); - if dcm && isfield(out.options.inF,'n5') - K1(:,:,i) = x(out.options.inF.n5,2:end); - else - K1(:,:,i) = x(:,2:end); - end - gotit = 1; - end -end -% clean up kernels -if ~gotit - H1 = []; - K1 = []; - tgrid = []; -else - tgrid = tgrid(2:end); - K1(abs(K1)<=1e-8) = 0; - H1(abs(H1)<=1e-8) = 0; -end - diff --git a/VBA_getLaplace.m b/VBA_getLaplace.m index 14179628..9f596aff 100644 --- a/VBA_getLaplace.m +++ b/VBA_getLaplace.m @@ -168,9 +168,9 @@ X0 = options.priors.muX0; % get prior covariance structure - dgdtheta = numericDiff(@getObs,1,theta,phi,X0,u,in); - dgdphi = numericDiff(@getObs,2,theta,phi,X0,u,in); - dgdX0 = numericDiff(@getObs,3,theta,phi,X0,u,in); + dgdtheta = VBA_numericDiff(@getObs,1,theta,phi,X0,u,in); + dgdphi = VBA_numericDiff(@getObs,2,theta,phi,X0,u,in); + dgdX0 = VBA_numericDiff(@getObs,3,theta,phi,X0,u,in); Vy2 = dgdtheta'*options.priors.SigmaTheta*dgdtheta ... + dgdphi' *options.priors.SigmaPhi *dgdphi ... diff --git a/VBA_get_dL.m b/VBA_get_dL.m deleted file mode 100644 index b10e0615..00000000 --- a/VBA_get_dL.m +++ /dev/null @@ -1,40 +0,0 @@ -function [ddydphi,d2gdx2,logL,dy,dy2,vy]=VBA_get_dL(gx,dG_dPhi,y,type,Qy,sigmaHat) - -if nargin<5 -end - -% prediction error -dy = y - gx; - -switch type - - case 0 %--- normal - vy=(1./sigmaHat).*diag(VBA_inv(Qy,[])); - ddydphi = sigmaHat.*(dG_dPhi*Qy*dy); - d2gdx2 = sigmaHat.*(dG_dPhi*Qy*dG_dPhi'); - dy2=dy'*Qy*dy ; - logL = -0.5*sigmaHat.*dy2 ; - logL = logL + 0.5*VBA_logDet(Qy*sigmaHat) - 0.5*log(2*pi) ; - - case 1 %--- binomial - gx = checkGX_binomial(gx); - vy = gx.*(1-gx) ; - ddydphi = dG_dPhi*(dy./vy); - temp = y./(gx).^2 - (y-1)./(1-gx).^2; - d2gdx2 = dG_dPhi*diag(temp)*dG_dPhi'; - logL = y'*log(gx) + (1-y)'*log(1-gx); - dy2 = sum(temp); - - case 2 %--- multinomial - gx = checkGX_binomial(gx); - vy = gx.*(1-gx) ; - ddydphi = dG_dPhi*(y./gx); - d2gdx2 = dG_dPhi*diag(y./gx.^2)*dG_dPhi'; - dy2 = sum(y./(gx).^2); - logL = log(gx)'*y; - -end - - - -end \ No newline at end of file diff --git a/VBA_groupBMC.m b/VBA_groupBMC.m index 1f5d5562..5c7708ca 100644 --- a/VBA_groupBMC.m +++ b/VBA_groupBMC.m @@ -103,10 +103,10 @@ out = []; return end - tmp = [tmp;vec(indf)]; + tmp = [tmp; VBA_vec(indf)]; options.C(indf,i) = 1; end - if ~isequal(vec(unique(tmp)),vec(1:K)) + if ~isequal(VBA_vec(unique(tmp)),VBA_vec(1:K)) if numel(unique(tmp)) < K disp('Error: families do not cover the entire set of models!') else @@ -216,7 +216,7 @@ out.L = L; out.F = F; % derive first and second order moments on model frequencies: -[out.Ef,out.Vf] = Dirichlet_moments(posterior.a); +[out.Ef,out.Vf] = VBA_dirichlet_moments(posterior.a); % derive exceedance probabilities % out.ep = VBA_ExceedanceProb(out.Ef,out.Vf,'gaussian'); out.ep = VBA_ExceedanceProb(posterior.a,[],'dirichlet',0); @@ -231,12 +231,13 @@ [out.F0] = FE_null(L,options); out.bor = 1/(1+exp(F-out.F0)); [out.Fffx] = FE_ffx(L,options); + out.pxp = out.ep * (1 - out.bor) + out.bor / numel(out.ep); end % pool evidence over families if ~isempty(options.families) out.families.r = options.C'*posterior.r; out.families.a = options.C'*posterior.a; - [out.families.Ef,out.families.Vf] = Dirichlet_moments(out.families.a); + [out.families.Ef,out.families.Vf] = VBA_dirichlet_moments(out.families.a); % out.families.ep = VBA_ExceedanceProb(out.families.Ef,out.families.Vf,'gaussian'); out.families.ep = VBA_ExceedanceProb(out.families.a,[],'dirichlet',0); end @@ -314,14 +315,4 @@ Fffx_fam = zf'*ss - sum(zf.*log(zf+eps)); end -function [E,V] = Dirichlet_moments(a) -% derives the first- and second-order moments of a Dirichlet density -a0 = sum(a); -E = a./a0; -V = -a*a'; -V = V - diag(diag(V)) + diag(a.*(a0-a)); -V = V./((a0+1)*a0^2); - - - diff --git a/VBA_groupBMC_btwConds.m b/VBA_groupBMC_btwConds.m index c3a540b0..9d0bd4a8 100644 --- a/VBA_groupBMC_btwConds.m +++ b/VBA_groupBMC_btwConds.m @@ -67,9 +67,9 @@ out = []; return end - tmp = [tmp;vec(indf)]; + tmp = [tmp; VBA_vec(indf)]; end - if ~isequal(vec(unique(tmp)),vec(1:nm)) + if ~isequal(VBA_vec(unique(tmp)),VBA_vec(1:nm)) if numel(unique(tmp)) < nm disp('Error: families do not cover the entire set of models!') else @@ -80,11 +80,11 @@ return end else - Cfam = vec(1:nm); + Cfam = VBA_vec(1:nm); end -try;factors;catch;factors=vec(1:nc);end +try;factors;catch;factors=VBA_vec(1:nc);end sf = size(factors); sf(sf<=1) = []; nf = size(sf,2); % number of factors/dimensions across conditions @@ -97,7 +97,7 @@ strpar(end) = []; strpar(2*(f-1)+1) = 'k'; for k=1:nlevels - eval(['indf{f}(:,k) = vec(factors(',strpar,'));']) + eval(['indf{f}(:,k) = VBA_vec(factors(',strpar,'));']) end end % form nc-tuples diff --git a/VBA_hyperparameters.m b/VBA_hyperparameters.m index e69b839f..e893b2d4 100644 --- a/VBA_hyperparameters.m +++ b/VBA_hyperparameters.m @@ -39,9 +39,16 @@ dim.p = size(y,1); end +if isfield(options,'sources') && (numel(options.sources)>1 || options.sources.type >0) + error('*** VBA_hyperparameters is only defined for unique gaussian sources\n'); +end + + % specify minimal default options options.tStart = tic; -options = VBA_check_struct(options,'binomial',0,'DisplayWin',1,'verbose',1,'kernelSize',16); +options = VBA_check_struct(options,'sources',struct(),'DisplayWin',1,'verbose',1,'kernelSize',16); +options.sources=VBA_check_struct(options.sources,'type',0,'out',dim.p); + kernelSize0 = options.kernelSize; options.kernelSize = 0; @@ -167,7 +174,7 @@ ylabel(ha(4),'') end drawnow - getSubplots + VBA_getSubplots (); end %--- VB: iterate until convergence... ---% @@ -227,10 +234,10 @@ if nphi >0 EP = posterior.a_phi/posterior.b_phi; VP = EP/posterior.b_phi; - set(ha(2),'xlim',[-.2,it+0.8],'xtick',[]) logCI = log(EP+sqrt(VP)) - log(EP); plotUncertainTimeSeries(log(EP),logCI.^2,it,ha(2)); set(ha(2),'ygrid','on','xgrid','off') + set(ha(2),'xlim',[-.2,it+0.8],'xtick',[]) end if ntheta >0 EP = posterior.a_theta/posterior.b_theta; @@ -310,7 +317,7 @@ out.options.kernelSize = kernelSize0; [tmp,out] = VBA_getDiagnostics(posterior,out); VBA_ReDisplay(posterior,out) - getSubplots + VBA_getSubplots (); end % subfunctions diff --git a/VBA_optimPriors.m b/VBA_optimPriors.m index ddb8f4e5..02ce8a32 100644 --- a/VBA_optimPriors.m +++ b/VBA_optimPriors.m @@ -178,12 +178,12 @@ opt.priors = density{m_gen}; % priors set to the chosen density for simulation try - [pX,gX,pY,gY,X,Y,U] = get_MCMC_predictiveDensity_f(M{m_gen}.f_fname,M{m_gen}.g_fname,u,n_t,opt,M{m_gen}.options.dim,Nsim,M{m_gen}.fb); + [pX,gX,pY,gY,X,Y,U] = VBA_MCMC_predictiveDensity_fb(M{m_gen}.f_fname,M{m_gen}.g_fname,u,n_t,opt,M{m_gen}.options.dim,Nsim,M{m_gen}.fb); simulations{m_gen}.X = X; simulations{m_gen}.Y = Y; simulations{m_gen}.U = U; catch - [pX,gX,pY,gY,X,Y] = get_MCMC_predictiveDensity(M{m_gen}.f_fname,M{m_gen}.g_fname,u,n_t,opt,M{m_gen}.options.dim,Nsim); + [pX,gX,pY,gY,X,Y] = VBA_MCMC_predictiveDensity(M{m_gen}.f_fname,M{m_gen}.g_fname,u,n_t,opt,M{m_gen}.options.dim,Nsim); simulations{m_gen}.X = X; simulations{m_gen}.Y = Y; end diff --git a/VBA_sample.m b/VBA_sample.m deleted file mode 100644 index 791fc61c..00000000 --- a/VBA_sample.m +++ /dev/null @@ -1,92 +0,0 @@ -function y = VBA_sample(form,suffStat,N,verbose) -% samples from exponential family probability distribution functions -% function y = VBA_sample(form,ss,N,verbose) -% IN: -% - form: 'gaussian', 'gamma', 'dirichlet', 'multinomial' or '1D' -% - suffStat: a structure with appropriate sufficient statistics, i.e.: -% -> if form='gaussian', suffStat.mu = E[y] and suffStat.Sigma = V[y] -% -> if form='gamma', suffStat.a = shape parameter, and suffStat.b = -% scale parameter of the Gamma density -% -> if form='dirichlet', suffStat.d = Dirichlet counts -% -> if form='multinomial', suffStat.p = multinomial probabilities -% and suffstat.n = number of independent trials -% - N: number of samples -% - verbose: verbose mode -% OUT: -% - y: KXN array of vector-valued samples (where K is the dimension of -% the sampled data). -% NOTE: by default, this function tries to use Matlab pseudo-random -% samplers. It reverts to SPM in case these functions cannot be called. - -if verbose - fprintf(1,['Sampling from ',form,' distribution... ']); - fprintf(1,'%6.2f %%',0) -end -switch form - - case 'gaussian' - S = VBA_getISqrtMat(suffStat.Sigma,0); - n = size(suffStat.mu,1); - y = repmat(suffStat.mu,1,N) + S*randn(n,N); - - case 'gamma' - try - y=gamrnd(suffStat.a,suffStat.b,1,N); - catch - y = zeros(1,N); - for i=1:N - y(i) = VBA_spm_gamrnd(suffStat.a,suffStat.b); - if mod(i,N./20) < 1 && verbose - fprintf(1,repmat('\b',1,8)) - fprintf(1,'%6.2f %%',100*i/N) - end - end - end - - case 'dirichlet' - K = size(suffStat.d,1); - try - r = gamrnd(repmat(vec(suffStat.d),1,N),1,K,N); - y = r ./ repmat(sum(r,1),K,1); - catch - y = zeros(K,N); - r = zeros(K,1); - for i=1:N - for k = 1:K - r(k) = VBA_spm_gamrnd(suffStat.d(k),1); - end - y(:,i) = r./sum(r); - if mod(i,N./20) < 1 && verbose - fprintf(1,repmat('\b',1,8)) - fprintf(1,'%6.2f %%',100*i/N) - end - end - end - - case 'multinomial' - try - y = mnrnd(suffStat.n,suffStat.p,N)'; - catch - K = size(suffStat.p,1); - y = zeros(K,N); - for i=1:suffStat.n - y = y + sampleFromArbitraryP(suffStat.p,eye(K),N)'; - if verbose - fprintf(1,repmat('\b',1,8)) - fprintf(1,'%6.2f %%',100*i/suffStat.n) - end - end - end - -end -if verbose - fprintf(1,repmat('\b',1,8)) - fprintf(' OK.') - fprintf('\n') -end - -function r = drchrnd(a,n) -p = length(a); -r = gamrnd(repmat(a,n,1),1,n,p); -r = r ./ repmat(sum(r,2),1,p); - diff --git a/VBA_setup.m b/VBA_setup.m index da8a709d..887f7a5c 100644 --- a/VBA_setup.m +++ b/VBA_setup.m @@ -125,7 +125,6 @@ function VBA_setup() fprintf('\n') try - cd .. ver = VBA_version(); fprintf(' The VBA-toolbox has been successfully installed in %s\n\n',ver.path); fprintf(' Type demo_Qlearning to give it a try\n\n'); diff --git a/simulateNLSS.m b/VBA_simulate.m similarity index 63% rename from simulateNLSS.m rename to VBA_simulate.m index a1efd8b1..6191f7b3 100644 --- a/simulateNLSS.m +++ b/VBA_simulate.m @@ -1,7 +1,7 @@ -function [y,x,x0,eta,e,u] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0,fb) +function [y,x,x0,eta,e,u] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0,fb) % samples times series from sDCM generative model % [y,x,x0,dTime,eta,eta0] = -% simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0) +% VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0) % % This function creates the time series of hidden-states and measurements % under the following nonlinear state-space model: @@ -77,24 +77,24 @@ 'n_t' , n_t ... ); -try, options.inG; catch, options.inG = []; end +try, options.inG; catch, options.inG = struct (); end try, U = u(:,1); catch, U = zeros(size(u,1),1); end -dim.p = size(feval(g_fname,zeros(dim.n,1),phi,U,options.inG),1); +dim.p = size(g_fname(zeros(dim.n,1),phi,U,options.inG),1); -options.dim=dim; +options.dim = dim; % --- check priors -options.priors = VBA_check_struct(options.priors, ... +options.priors = VBA_check_struct (options.priors, ... 'a_alpha', 1, ... 'b_alpha', 1 ... ) ; -if isinf(options.priors.a_alpha) && isequal(options.priors.b_alpha,0) +if isempty (options.priors.a_alpha) || (isinf (options.priors.a_alpha) && isequal (options.priors.b_alpha, 0)) options.priors.a_alpha = 1; options.priors.b_alpha = 1; end % --- check options -[options,u,dim] = VBA_check(zeros(dim.p,dim.n_t),u,f_fname,g_fname,dim,options); +[options, u, dim] = VBA_check (zeros (dim.p, dim.n_t), u, f_fname, g_fname, dim, options); % === Prepare simulation @@ -106,15 +106,14 @@ et0 = clock; % pre-allocate variables -x = zeros(dim.n,dim.n_t); -eta = zeros(dim.n,dim.n_t); -e = zeros(dim.p,dim.n_t); -y = zeros(dim.p,dim.n_t); +x = zeros (dim.n, dim.n_t); +eta = zeros (dim.n, dim.n_t); +e = zeros (dim.p, dim.n_t); +y = zeros (dim.p, dim.n_t); % muxer -n_sources=numel(options.sources); -sgi = find([options.sources(:).type]==0) ; - +n_sources = numel (options.sources); +sgi = find ([options.sources(:).type] == 0) ; % === Simulate timeseries @@ -122,11 +121,9 @@ if dim.n > 0 try x0; + assert(~ VBA_isWeird(x0)); catch - x0 = options.priors.muX0; - sQ0 = VBA_getISqrtMat(options.priors.SigmaX0,0); - x0 = x0 + sQ0*randn(dim.n,1); - clear sQ0 + x0 = VBA_random ('Gaussian', options.priors.muX0, options.priors.SigmaX0); end else x0 = zeros(dim.n,1); @@ -139,85 +136,74 @@ VBA_disp({ ... 'Simulating SDE...' , ... sprintf('%6.2f %%%%',0) ... - }, options); + }, options, false); %-- Loop over time points for t = 1:dim.n_t % Evaluate evolution function at past hidden state - if dim.n > 0 - x(:,t+1) = VBA_evalFun('f',x(:,t),theta,u(:,t),options,dim,t) ; - if ~isinf(alpha) - Cx = VBA_getISqrtMat(iQx{t}); - eta(:,t) = (1./sqrt(alpha))*Cx*randn(dim.n,1); - x(:,t+1) = x(:,t+1) + eta(:,t); - end + if dim.n > 0 + % stochastic innovation + Cx = VBA_inv (iQx{t}) / alpha ; + eta(:, t) = VBA_random ('Gaussian', zeros (dim.n, 1), Cx) ; + % evolution + x(:, t + 1) = ... + VBA_evalFun ('f', x(:, t), theta, u(:, t), options, dim, t) ... + + eta(:, t); end % Evaluate observation function at current hidden state - gt = VBA_evalFun('g',x(:,t+1),phi,u(:,t),options,dim,t); + gt = VBA_evalFun ('g', x(:, t + 1), phi, u(:, t), options, dim, t); - for i=1:n_sources + for i = 1 : n_sources s_idx = options.sources(i).out; switch options.sources(i).type % gaussian case 0 - sigma_i = sigma(find(sgi==i)) ; - if ~isinf(sigma_i) - C = VBA_getISqrtMat(iQy{t,find(sgi==i)}); - e(s_idx,t) = (1./sqrt(sigma_i))*C*randn(length(s_idx),1); - end - y(s_idx,t) = gt(s_idx) + e(s_idx,t) ; + C = VBA_inv (iQy{t, sgi == i}) / sigma(sgi == i) ; + y(s_idx,t) = VBA_random ('Gaussian', gt(s_idx), C) ; % binomial case 1 - for k=1:length(s_idx) - y(s_idx(k),t) = sampleFromArbitraryP([gt(s_idx(k)),1-gt(s_idx(k))],[1,0]',1); - end + y(s_idx, t) = VBA_random ('Bernoulli', gt(s_idx)); + % multinomial case 2 - resp = zeros(length(s_idx),1) ; - respIdx = sampleFromArbitraryP(gt(s_idx),1:length(s_idx),1) ; - if ~isnan(respIdx) - resp(respIdx) = 1; - y(s_idx,t) = resp; - else - y(s_idx,t) = NaN; - end + y(s_idx, t) = VBA_random ('Multinomial', 1, gt(s_idx)); end end e(:,t) = y(:,t) - gt; - % fill in next input with last output and feedback if feedback && t < dim.n_t % get feedback on system's output - if ~isempty(fb.h_fname) - u(fb.indfb,t+1) = feval(fb.h_fname,y(:,t),t,fb.inH); + if ~ isempty (fb.h_fname) + u(fb.indfb, t + 1) = fb.h_fname (y(:, t), t, fb.inH); end - u(fb.indy,t+1) = y(:,t); + u(fb.indy, t + 1) = y(:, t); end % Display progress if mod(100*t/dim.n_t,10) <1 VBA_disp({ ... - repmat('\b',1,9) , ... + repmat('\b',1,8) , ... sprintf('%6.2f %%%%',floor(100*t/dim.n_t)), ... - }, options); - end - - if isweird({x(:,t)}) %,y(:,t) - break + }, options, false); end end %unstack X0 -x(:,1) = []; +x(:, 1) = []; +% checks +if VBA_isWeird (x) + error('VBA_simulate: evolution function produced weird values!'); +end + % Display progress VBA_disp({ ... - repmat('\b',1,9) ,... + repmat('\b',1,8) ,... [' OK (took ',num2str(etime(clock,et0)),' seconds).'] ... },options); diff --git a/VBA_unit_tests.m b/VBA_unit_tests.m deleted file mode 100644 index f1d52295..00000000 --- a/VBA_unit_tests.m +++ /dev/null @@ -1,42 +0,0 @@ -function logs = VBA_unit_tests(logs) -% This function simply launches sequentially all the demos of the toolbox and -% reports execution time and potential failures - - -%% find demos -vba_info = VBA_version(); -[~,list]=system(['find ' vba_info.path ' -name demo_*']) ; - -demos = {}; -for p = strsplit(list) - if ~isempty(p{1}) - [~,demos{end+1},~] = fileparts(p{1}); - end -end - -% setup for base -setup = 'pause off; warning off; clear all; close all; ' ; - - -%% run demos -logs = {}; - -parfor i = 1:numel(demos) - demo_name = demos{i}; - - try - close all - - fprintf('\n ####################\n %s \n ####################\n',demo_name) - tic ; - evalin('base',[ setup demo_name]) - logs{i} = struct('demo',demo_name,'status',1,'stack',[],'time',toc); - catch err - logs{i} = struct('demo',demo_name,'status',0,'stack',err,'time',toc); - end - -end - -logs = [logs{:}]; - - diff --git a/VBA_version.m b/VBA_version.m index ad36660a..784bbe29 100644 --- a/VBA_version.m +++ b/VBA_version.m @@ -1,4 +1,4 @@ -function infos = VBA_version() +function [infos, status] = VBA_version() % infos = VBA_version() % return infos about the current version of the toolbox. It requires the % toolbox to be installed using git (not by downlading the zip) to work properly: @@ -18,20 +18,23 @@ infos.version = [gitInf.branch '/' gitInf.hash]; infos.git = true; + if nargout == 2 + try % try to see if new commits are online request = sprintf('https://api.github.com/repos/MBB-team/VBA-toolbox/compare/%s...%s',gitInf.branch,gitInf.hash); tracker=webread(request); switch tracker.status case 'identical' - infos.status = 'The toolbox is up to date.'; + status = 'The toolbox is up to date.'; case 'behind' - infos.status = sprintf('The toolbox is %d revision(s) behind the online version.',tracker.behind_by); + status = sprintf('The toolbox is %d revision(s) behind the online version.',tracker.behind_by); case 'ahead' - infos.status = sprintf('The toolbox is %d revision(s) ahead the online version.',tracker.ahead_by); + status = sprintf('The toolbox is %d revision(s) ahead the online version.',tracker.ahead_by); end end + end catch infos.version = 'unkown (not under git control)'; diff --git a/checkGX_binomial.m b/checkGX_binomial.m deleted file mode 100644 index 1d217fa4..00000000 --- a/checkGX_binomial.m +++ /dev/null @@ -1,16 +0,0 @@ -function x = checkGX_binomial(x,lim) - -% finesses 0/1 (inifinite precision) binomial probabilities - -% function x = checkGX_binomial(x) -% IN: -% - x: vector of binomial probabilities -% - lim: probability threshold -% OUT: -% - x: vector of corrected binomial probabilities - -if nargin==1 - lim = 1e-8; -end -x(x<=lim) = lim; -x(x>=1-lim) = 1-lim; \ No newline at end of file diff --git a/VBA_FreeEnergy.m b/core/VBA_FreeEnergy.m similarity index 98% rename from VBA_FreeEnergy.m rename to core/VBA_FreeEnergy.m index 5d2083f3..25751805 100644 --- a/VBA_FreeEnergy.m +++ b/core/VBA_FreeEnergy.m @@ -94,7 +94,7 @@ if ~isempty(indIn) ntot = ntot + length(indIn); Q = priors.SigmaPhi(indIn,indIn); - iQ = VBA_inv(Q,[]); + iQ = VBA_inv(Q); SSE = SSE + suffStat.dphi(indIn)'*iQ*suffStat.dphi(indIn); ldQ = ldQ - VBA_logDet(Q,[]); S = S + suffStat.Sphi - 0.5*length(indIn); @@ -107,7 +107,7 @@ if ~isempty(indIn) ntot = ntot + length(indIn); Q = priors.SigmaTheta(indIn,indIn); - iQ = VBA_inv(Q,[]); + iQ = VBA_inv(Q); SSE = SSE + suffStat.dtheta(indIn)'*iQ*suffStat.dtheta(indIn); ldQ = ldQ - VBA_logDet(Q,[]); S = S + suffStat.Stheta - 0.5*length(indIn); @@ -120,7 +120,7 @@ if ~isempty(indIn) ntot = ntot + length(indIn); Q = priors.SigmaX0(indIn,indIn); - iQ = VBA_inv(Q,[]); + iQ = VBA_inv(Q); SSE = SSE + suffStat.dx0(indIn)'*iQ*suffStat.dx0(indIn); ldQ = ldQ - VBA_logDet(Q,[]); S = S + suffStat.SX0 - 0.5*length(indIn); diff --git a/VBA_GN.m b/core/VBA_GN.m similarity index 75% rename from VBA_GN.m rename to core/VBA_GN.m index d6a6bdac..5cfcf186 100644 --- a/VBA_GN.m +++ b/core/VBA_GN.m @@ -31,48 +31,66 @@ % iterations... switch flag + + % for stochastic evolution, update hidden states with (lagged) message passing algorithm case 'X' if numel(options.sources)>1 || options.sources(1).type==2 - error('*** Stochastic multichannel VB is not yet supported !'); + error('*** Stochastic multisources or multinomial VB is not yet supported !'); end indIn = options.params2update.x; PreviousMu = posterior.muX; - if ~options.binomial + switch options.sources(1).type + case 0 % gaussian fname = @VBA_IX_lagged; - else + case 1 % binomial fname = @VBA_IX_lagged_binomial; end s1 = 'I() ='; s2 = ''; + + % update initial state case 'X0' indIn = options.params2update.x0; PreviousMu = posterior.muX0(indIn); fname = @VBA_IX0; s1 = 'I() ='; s2 = ''; + + % update observation paramters case 'Phi' indIn = options.params2update.phi; PreviousMu = posterior.muPhi(indIn); - if options.UNL % to be rationalized... + + % un-normalized likelihood + if options.UNL + if numel(options.sources)>1 + error('*** un-normalized likelihood is only supported for mono-source VB!'); + end fname = @VBA_Iphi_UNL; - else - if numel(options.sources)>1 || options.sources(1).type==2 - fname = @VBA_Iphi_extended; - else - if options.nmog > 1 - if options.extended - error('*** Splitted multichannel VB is not yet supported !'); - end - fname = @VBA_Iphi_split; - elseif options.binomial - fname = @VBA_Iphi_binomial; - else - fname = @VBA_Iphi; - end + + % MoG split + elseif options.nmog > 1 + if numel(options.sources)>1 || options.sources(1).type == 2 + error('*** MoG Split is only supported for mono-source (gaussian or binomial) VB!'); end + fname = @VBA_Iphi_split; + + % legacy gaussian observation code + elseif numel(options.sources)==1 && options.sources(1).type==0 + fname = @VBA_Iphi; + + % legacy binomial observation code + elseif numel(options.sources)==1 && options.sources(1).type==1 + fname = @VBA_Iphi_binomial; + + % multisource or multinomial observations + else + fname = @VBA_Iphi_extended; end s1 = 'I() ='; s2 = ''; + + % update evolution paramters case 'Theta' indIn = options.params2update.theta; PreviousMu = posterior.muTheta(indIn); @@ -87,7 +105,7 @@ % Get variational energy (I) and propose move (deltaMu) try - [I,Sigma,deltaMu,suffStat2] = feval(fname,PreviousMu,y,posterior,suffStat,dim,u,options); + [I,Sigma,deltaMu,suffStat2] = fname(PreviousMu,y,posterior,suffStat,dim,u,options); PreviousI = I; catch err VBA_disp(['Warning: could not evaluate variational energy on ',flag,'!'],options) @@ -105,6 +123,9 @@ plot(ha,deltaMu') VBA_title(ha,[s2,' ; ',str]) drawnow +else + hf = nan; + ha = nan; end % Regularized Gauss-Newton VB-Laplace update @@ -119,7 +140,7 @@ mu = PreviousMu + deltaMu; try % get next move and energy step - [I,Sigma,NextdeltaMu,suffStat2] = feval(fname,mu,y,posterior,suffStat,dim,u,options); + [I,Sigma,NextdeltaMu,suffStat2] = fname(mu,y,posterior,suffStat,dim,u,options); % get increment in variational/free energy [rdf,deltaI,F] = getCostIncrement(I,PreviousI,mu,Sigma,suffStat2,options,posterior,flag); catch @@ -133,11 +154,13 @@ plot(ha,deltaMu') str = [s1,num2str(I,'%4.3e'),' ,dI/I =',num2str(rdf,'%4.3e'),' ,it #',num2str(it)]; end - VBA_pause(options) % check 'pause' button + VBA_pause(options); % check 'pause' button % accept move or halve step? if deltaI<0 % halve step size deltaMu = 0.5*deltaMu; - try, VBA_title(ha,[s2,': halve step ; ',str]);end + if ishandle(ha) + VBA_title(ha,[s2,': halve step ; ',str]); + end else % accept move % 1- propose a new move according to new local quadratic approx deltaMu = NextdeltaMu; @@ -160,16 +183,24 @@ case 'Theta' VBA_updateDisplay(posterior,suffStat,options,y,[],'theta') end - try,VBA_title(ha,[s2,': accept move ; ',str]);end + if ishandle(ha) + VBA_title(ha,[s2,': accept move ; ',str]); + end conv = 1; end % check convergence criterion if abs(rdf)<=options.GnTolFun || it==options.GnMaxIter stop = 1; - try close(hf); end - try close(suffStat.haf); end + if ishandle(hf) + close(hf); + end + if isfield (suffStat, 'haf') && ishandle(suffStat.haf) + close(suffStat.haf); + end + end + if options.DisplayWin || options.GnFigs + drawnow; end - drawnow end if ~conv suffStat.F = [suffStat.F,suffStat.F(end)]; diff --git a/VBA_Hpost.m b/core/VBA_Hpost.m similarity index 100% rename from VBA_Hpost.m rename to core/VBA_Hpost.m diff --git a/VBA_IX0.m b/core/VBA_IX0.m similarity index 95% rename from VBA_IX0.m rename to core/VBA_IX0.m index 35bba1ae..7372cef9 100644 --- a/VBA_IX0.m +++ b/core/VBA_IX0.m @@ -33,11 +33,11 @@ % posterior covariance matrix terms Q = options.priors.SigmaX0(indIn,indIn); -iQ = VBA_inv(Q,[]); +iQ = VBA_inv(Q); iSigmaX0 = iQ + alphaHat.*dF_dX0(indIn,:)*iQx0(indIn,indIn)*dF_dX0(indIn,:)'; % posterior covariance matrix -SigmaX0 = VBA_inv(iSigmaX0,[]); +SigmaX0 = VBA_inv(iSigmaX0); % mode tmp = iQ*dx0(indIn) + alphaHat.*dF_dX0(indIn,:)*iQx0(indIn,indIn)*dx; @@ -45,7 +45,7 @@ % variational energy IX0 = -0.5.*dx0'*iQ*dx0 - 0.5*alphaHat.*dx2; -if isweird(IX0) +if VBA_isWeird (IX0) div = 1; IX0 = -Inf; else diff --git a/VBA_IX_lagged.m b/core/VBA_IX_lagged.m similarity index 94% rename from VBA_IX_lagged.m rename to core/VBA_IX_lagged.m index d8c56803..dc11e047 100644 --- a/VBA_IX_lagged.m +++ b/core/VBA_IX_lagged.m @@ -113,7 +113,7 @@ SigmaX.inter{t-lag+1} = St(1:dim.n,dim.n+1:2*dim.n); % Predictive density (data space) - V = (1./sigmaHat).*VBA_inv(iQy{t-lag+1},[]) + dG_dX{t-lag+1}'*SigmaX.current{t-lag+1}*dG_dX{t-lag+1}; + V = (1./sigmaHat).*VBA_inv(iQy{t-lag+1}) + dG_dX{t-lag+1}'*SigmaX.current{t-lag+1}*dG_dX{t-lag+1}; if dim.n_phi > 0 V = V + dG_dPhi{t-lag+1}'*posterior.SigmaPhi*dG_dPhi{t-lag+1}; end @@ -128,7 +128,7 @@ end % Accelerate divergent update - if isweird({dy2,dx2,dG_dX{t},dF_dX{t-1},SigmaX.current{t}}) + if VBA_isWeird ({dy2, dx2, dG_dX{t}, dF_dX{t-1}, SigmaX.current{t}}) div = 1; break end @@ -146,7 +146,7 @@ muX(:,dim.n_t-(lag-k)) = mt(ik); % Predictive density (data space) - V = (1./sigmaHat).*VBA_inv(iQy{dim.n_t-(lag-k)},[]) + dG_dX{dim.n_t-(lag-k)}'*SigmaX.current{dim.n_t-(lag-k)}*dG_dX{dim.n_t-(lag-k)}; + V = (1./sigmaHat).*VBA_inv(iQy{dim.n_t-(lag-k)}) + dG_dX{dim.n_t-(lag-k)}'*SigmaX.current{dim.n_t-(lag-k)}*dG_dX{dim.n_t-(lag-k)}; if dim.n_phi > 0 V = V + dG_dPhi{dim.n_t-(lag-k)}'*posterior.SigmaPhi*dG_dPhi{dim.n_t-(lag-k)}; end @@ -169,7 +169,7 @@ % variational energy IX = -0.5.*sigmaHat.*dy2 -0.5*alphaHat.*dx2; -if isweird(IX) || div +if VBA_isWeird (IX) || div IX = -Inf; end diff --git a/VBA_IX_lagged_binomial.m b/core/VBA_IX_lagged_binomial.m similarity index 96% rename from VBA_IX_lagged_binomial.m rename to core/VBA_IX_lagged_binomial.m index 15caf043..2f8158e1 100644 --- a/VBA_IX_lagged_binomial.m +++ b/core/VBA_IX_lagged_binomial.m @@ -49,7 +49,7 @@ [gx(:,1),dG_dX{1},dG_dPhi{1}] = VBA_evalFun('g',X(:,1),posterior.muPhi,u(:,1),options,dim,1); % fix numerical instabilities -gx(:,1) = checkGX_binomial(gx(:,1)); +gx(:,1) = VBA_finiteBinomial (gx(:,1)); % check infinite precision transition pdf iQ = VBA_inv(iQx{1},indIn{1},'replace'); @@ -97,7 +97,7 @@ [gx(:,t),dG_dX{t},dG_dPhi{t}] = VBA_evalFun('g',X(:,t),posterior.muPhi,u(:,t),options,dim,t); % fix numerical instabilities - gx(:,t) = checkGX_binomial(gx(:,t)); + gx(:,t) = VBA_finiteBinomial (gx(:,t)); % remove irregular trials yin = find(~options.isYout(:,t)); @@ -151,7 +151,7 @@ end % Accelerate divergent update - if isweird({dx2,dG_dX{t},dF_dX{t-1},SigmaX.current{t}}) + if VBA_isWeird ({dx2, dG_dX{t}, dF_dX{t-1}, SigmaX.current{t}}) div = 1; break end @@ -192,7 +192,7 @@ % variational energy IX = logL -0.5*alphaHat.*dx2; -if isweird({{IX},SigmaX.current,SigmaX.inter}) || div +if VBA_isWeird ({IX, SigmaX.current, SigmaX.inter}) || div IX = -Inf; end diff --git a/VBA_Initialize.m b/core/VBA_Initialize.m similarity index 96% rename from VBA_Initialize.m rename to core/VBA_Initialize.m index 83cb9ccd..dfadd673 100644 --- a/VBA_Initialize.m +++ b/core/VBA_Initialize.m @@ -38,7 +38,7 @@ posterior.a_alpha = options.priors.a_alpha; posterior.b_alpha = options.priors.b_alpha; - if (~options.binomial || sum([options.sources(:).type]==0) >1) && ~options.initHP + if any([options.sources.type]==0) && ~options.initHP posterior.a_sigma = options.priors.a_sigma; posterior.b_sigma = options.priors.b_sigma; end diff --git a/VBA_Iphi.m b/core/VBA_Iphi.m similarity index 95% rename from VBA_Iphi.m rename to core/VBA_Iphi.m index 15c50bc8..25212869 100644 --- a/VBA_Iphi.m +++ b/core/VBA_Iphi.m @@ -25,7 +25,7 @@ % Preallocate intermediate variables iQy = options.priors.iQy; Q = options.priors.SigmaPhi(indIn,indIn); -iQ = VBA_inv(Q,[]); +iQ = VBA_inv(Q); muPhi0 = options.priors.muPhi; Phi = muPhi0; Phi(indIn) = phi; @@ -65,7 +65,7 @@ ddydphi = ddydphi + dG_dPhi*iQy{t}*dy(:,t); % Predictive density (data space) - V = dG_dPhi'*posterior.SigmaPhi*dG_dPhi + (1./sigmaHat).*VBA_inv(iQy{t},[]); + V = dG_dPhi'*posterior.SigmaPhi*dG_dPhi + (1./sigmaHat).*VBA_inv(iQy{t}); if dim.n > 0 V = V + dG_dX'*posterior.SigmaX.current{t}*dG_dX; end @@ -81,7 +81,7 @@ end % Accelerate divergent update - if isweird({dy2,dG_dPhi,dG_dX}) + if VBA_isWeird ({dy2, dG_dPhi, dG_dX}) div = 1; break end @@ -98,7 +98,7 @@ % posterior covariance matrix iSigmaPhi = iQ + sigmaHat.*d2gdx2(indIn,indIn); -SigmaPhi = VBA_inv(iSigmaPhi,[]); +SigmaPhi = VBA_inv(iSigmaPhi); % mode tmp = iQ*dphi0(indIn) + sigmaHat.*ddydphi(indIn); @@ -106,7 +106,7 @@ % variational energy Iphi = -0.5.*dphi0(indIn)'*iQ*dphi0(indIn) -0.5*sigmaHat.*dy2; -if isweird({Iphi,SigmaPhi}) || div +if VBA_isWeird ({Iphi, SigmaPhi}) || div Iphi = -Inf; end diff --git a/VBA_Iphi_UNL.m b/core/VBA_Iphi_UNL.m similarity index 87% rename from VBA_Iphi_UNL.m rename to core/VBA_Iphi_UNL.m index 3a3dfd9f..0b5a4187 100644 --- a/VBA_Iphi_UNL.m +++ b/core/VBA_Iphi_UNL.m @@ -16,7 +16,7 @@ % Preallocate intermediate variables Q = options.priors.SigmaPhi(indIn,indIn); -iQ = VBA_inv(Q,[]); +iQ = VBA_inv(Q); muPhi0 = options.priors.muPhi; Phi = muPhi0; Phi(indIn) = phi; @@ -45,7 +45,7 @@ d2LLdP2 = d2LLdP2 + d2LLdP2t; % Accelerate divergent update - if isweird({LL,dLLdP,d2LLdP2,Ey,Vy}) + if VBA_isWeird ({LL, dLLdP, d2LLdP2, Ey, Vy}) div = 1; break end @@ -66,15 +66,15 @@ if options.checkGrads mayPause = 0; if ~isempty(Phi) - dLLdPt2 = numericDiff(@VBA_evalAL,2,[],Phi,beta,u(:,t),y(:,t),options); - if ~isweird(dLLdPt2) + dLLdPt2 = VBA_numericDiff(@VBA_evalAL,2,[],Phi,beta,u(:,t),y(:,t),options); + if ~ VBA_isWeird (dLLdPt2) [hf] = VBA_displayGrads(dLLdPt',dLLdPt2,'Gradients wrt parameters',options.g_fname,'g'); mayPause = 1; else VBA_disp('VBA check_grads: Warning: weird numerical gradients!!!') end - d2LLdP2t2 = numericDiff(@numericDiff,4,@VBA_evalAL,2,[],Phi,beta,u(:,t),y(:,t),options); - if ~isweird(dLLdPt2) + d2LLdP2t2 = VBA_numericDiff(@numericDiff,4,@VBA_evalAL,2,[],Phi,beta,u(:,t),y(:,t),options); + if ~ VBA_isWeird (dLLdPt2) [hf] = VBA_displayGrads(d2LLdP2t,d2LLdP2t2,'Hessians wrt parameters',options.g_fname,'g'); mayPause = 1; else @@ -101,7 +101,7 @@ % posterior covariance matrix iSigmaPhi = iQ - d2LLdP2(indIn,indIn); -SigmaPhi = VBA_inv(iSigmaPhi,[]); +SigmaPhi = VBA_inv(iSigmaPhi); % mode tmp = iQ*dphi0(indIn) + dLLdP(indIn)'; @@ -109,7 +109,7 @@ % variational energy Iphi = -0.5.*dphi0(indIn)'*iQ*dphi0(indIn) + LL; -if isweird({Iphi,SigmaPhi}) +if VBA_isWeird ({Iphi, SigmaPhi}) Iphi = -Inf; end diff --git a/VBA_Iphi_binomial.m b/core/VBA_Iphi_binomial.m similarity index 94% rename from VBA_Iphi_binomial.m rename to core/VBA_Iphi_binomial.m index fe1a2c0c..7910c77d 100644 --- a/VBA_Iphi_binomial.m +++ b/core/VBA_Iphi_binomial.m @@ -18,7 +18,7 @@ % Preallocate intermediate variables Q = options.priors.SigmaPhi(indIn,indIn); -iQ = VBA_inv(Q,[]); +iQ = VBA_inv(Q); muPhi0 = options.priors.muPhi; Phi = muPhi0; Phi(indIn) = phi; @@ -43,7 +43,7 @@ [gx(:,t),dG_dX,dG_dPhi] = VBA_evalFun('g',posterior.muX(:,t),Phi,u(:,t),options,dim,t); % fix numerical instabilities - gx(:,t) = checkGX_binomial(gx(:,t)); + gx(:,t) = VBA_finiteBinomial (gx(:,t)); % store states dynamics if ODE mode if isequal(options.g_fname,@VBA_odeLim) @@ -80,7 +80,7 @@ end % Accelerate divergent update - if isweird({dy,dG_dPhi,dG_dX}) + if VBA_isWeird ({dy, dG_dPhi, dG_dX}) div = 1; break end @@ -95,7 +95,7 @@ % posterior covariance matrix iSigmaPhi = iQ + d2gdx2(indIn,indIn); -SigmaPhi = VBA_inv(iSigmaPhi,[]); +SigmaPhi = VBA_inv(iSigmaPhi); % mode tmp = iQ*dphi0(indIn) + ddydphi(indIn); @@ -103,7 +103,7 @@ % variational energy Iphi = -0.5.*dphi0(indIn)'*iQ*dphi0(indIn) + logL; -if isweird({Iphi,SigmaPhi}) || div +if VBA_isWeird ({Iphi, SigmaPhi}) || div Iphi = -Inf; end diff --git a/core/VBA_Iphi_extended.m b/core/VBA_Iphi_extended.m new file mode 100644 index 00000000..7210d257 --- /dev/null +++ b/core/VBA_Iphi_extended.m @@ -0,0 +1,166 @@ +function [Iphi,SigmaPhi,deltaMuPhi,suffStat] = VBA_Iphi_extended(phi,y,posterior,suffStat,dim,u,options) +% Gauss-Newton update of the observation parameters +% !! When the observation function is @VBA_odeLim, this Gauss-Newton update +% actually implements a gradient ascent on the variational energy of the +% equivalent deterministic DCM. +% !! All mean-field perturbation terms are ignored (options.ignoreMF=true) + + +if options.DisplayWin % Display progress + if isequal(options.g_fname,@VBA_odeLim) + STR = 'VB Gauss-Newton on observation/evolution parameters... '; + else + STR = 'VB Gauss-Newton on observation parameters... '; + end + set(options.display.hm(1),'string',STR); + set(options.display.hm(2),'string','0%'); + drawnow +end + +% Look-up which evolution parameter to update +indIn = options.params2update.phi; + + +% Preallocate intermediate variables +iQy = options.priors.iQy; +Q = options.priors.SigmaPhi(indIn,indIn); +iQ = VBA_inv(Q); +muPhi0 = options.priors.muPhi; +Phi = muPhi0; +Phi(indIn) = phi; +dphi0 = muPhi0-Phi; +dy = zeros(dim.p,dim.n_t); +vy = zeros(dim.p,dim.n_t); +gx = zeros(dim.p,dim.n_t); +ddydphi = 0; +d2gdx2 = 0; + +dy2 = zeros(1,numel(options.sources)); +logL = zeros(1,numel(options.sources)); + +if isequal(options.g_fname,@VBA_odeLim) + clear VBA_odeLim + muX = zeros(options.inG.old.dim.n,dim.n_t); + SigmaX = cell(dim.n_t,1); +end +div = 0; + +isTout = all (options.isYout, 1); + +%--- Loop over time series ---% +for t=1:dim.n_t + % evaluate observation function at current mode + [gx(:,t),dG_dX,dG_dPhi] = VBA_evalFun('g',posterior.muX(:,t),Phi,u(:,t),options,dim,t); + + % store states dynamics if ODE mode + if isequal(options.g_fname,@VBA_odeLim) + % get sufficient statistics of the hidden states from unused i/o in + % VBA_evalFun. + muX(:,t) = dG_dX.xt; + SigmaX{t} = dG_dX.dx'*posterior.SigmaPhi*dG_dX.dx; + end + + if ~isTout(t) + + % accumulate gradients, hessian and likelyhood + gsi = find([options.sources(:).type]==0) ; + for si = 1:numel(options.sources) + % compute source contribution + idx_obs_all = options.sources(si).out; + is_obs_out = find(options.isYout(idx_obs_all,t)==0); + idx_obs = idx_obs_all(is_obs_out); + + if ~isempty(idx_obs) + + sigmaHat=0; + iQyt=[]; + if options.sources(si).type==0 + gi = find(si==gsi) ; + sigmaHat = posterior.a_sigma(gi)./posterior.b_sigma(gi); + iQyt = iQy{t,gi}; + iQyt=iQyt(is_obs_out,is_obs_out); + end + + [ddydphi_t,d2gdx2_t,logL_t,dy(idx_obs,t),dy2_t,vy(idx_obs,t)] = VBA_get_dL(gx(idx_obs,t),dG_dPhi(:,idx_obs),y(idx_obs,t),options.sources(si).type,iQyt,sigmaHat); + + % aggregate + ddydphi = ddydphi + ddydphi_t; + d2gdx2 = d2gdx2 + d2gdx2_t; + dy2(si) = dy2(si) + dy2_t; + logL(si) = logL(si) + logL_t; + + if options.sources(si).type==0 + V = dG_dPhi(:,idx_obs)'*posterior.SigmaPhi*dG_dPhi(:,idx_obs) ; + if dim.n > 0 + V = V + dG_dX(:,idx_obs)'*posterior.SigmaX.current{t}*dG_dX(:,idx_obs); + end + vy(idx_obs,t) = vy(idx_obs,t) + diag(V); + end + + end + + + + end + + end + + % Display progress + if mod(t,dim.n_t./10) < 1 + if options.DisplayWin + set(options.display.hm(2),'string',[num2str(floor(100*t/dim.n_t)),'%']); + drawnow + end + end + + + + % Accelerate divergent update + if VBA_isWeird ({dy2, dG_dPhi, dG_dX}) + div = 1; + break + end + + +end + + +% Display progress +if options.DisplayWin + set(options.display.hm(2),'string','OK'); + drawnow +end + +% posterior covariance matrix + +iSigmaPhi = iQ + d2gdx2(indIn,indIn); +SigmaPhi = VBA_inv(iSigmaPhi); + +% mode +tmp = iQ*dphi0(indIn) + ddydphi(indIn); +deltaMuPhi = SigmaPhi*tmp; + +% variational energy +Iphi = -0.5.*dphi0(indIn)'*iQ*dphi0(indIn) + sum(logL); +if VBA_isWeird ({Iphi, SigmaPhi}) || div + Iphi = -Inf; +end + +% update sufficient statistics +suffStat.Iphi = Iphi; +suffStat.gx = gx; +suffStat.dy = dy; +suffStat.dy2 = dy2; +suffStat.logL = logL; +suffStat.vy = vy; +suffStat.dphi = dphi0; +if isequal(options.g_fname,@VBA_odeLim) + suffStat.muX = muX; + suffStat.SigmaX = SigmaX; +end +suffStat.div = div; + +end + + + diff --git a/VBA_Iphi_split.m b/core/VBA_Iphi_split.m similarity index 95% rename from VBA_Iphi_split.m rename to core/VBA_Iphi_split.m index 1d6bdfad..3df7afef 100644 --- a/VBA_Iphi_split.m +++ b/core/VBA_Iphi_split.m @@ -42,7 +42,7 @@ % Preallocate intermediate variables iQy = options.priors.iQy; Q = options.priors.SigmaPhi(indIn,indIn); -iQ = VBA_inv(Q,[]); +iQ = VBA_inv(Q); muPhi0 = options.priors.muPhi; Phi = muPhi0; Phi(indIn) = phi; @@ -55,7 +55,7 @@ div = 0; % intermediary variables: MoG split -sqrtS = VBA_getISqrtMat(posterior.SigmaPhi(indIn,indIn),0); +sqrtS = VBA_sqrtm (posterior.SigmaPhi(indIn,indIn)); split = options.split; nd = size(split.m,2); Mu0 = muPhi0; @@ -99,7 +99,7 @@ ddydphi = ddydphi + split.w(i).*dG_dPhi*iQy{t}*dyti; % Predictive density (data space) - Vy{i} = dG_dPhi'*Sigma0*dG_dPhi + (1./sigmaHat).*VBA_inv(iQy{t},[]); + Vy{i} = dG_dPhi'*Sigma0*dG_dPhi + (1./sigmaHat).*VBA_inv(iQy{t}); if dim.n > 0 Vy{i} = Vy{i} + dG_dX'*posterior.SigmaX.current{t}*dG_dX; end @@ -136,7 +136,7 @@ end % Accelerate divergent update - if isweird(dy2) || isweird(dG_dPhi) || isweird(dG_dX) + if VBA_isWeird ({dy2, dG_dPhi, dG_dX}) div = 1; break end @@ -156,7 +156,7 @@ % posterior covariance matrix iSigmaPhi = iQ + sigmaHat.*d2gdx2(indIn,indIn); -SigmaPhi = VBA_inv(iSigmaPhi,[]);%./split.s(1); +SigmaPhi = VBA_inv(iSigmaPhi);%./split.s(1); % mode Mu0 = muPhi0; @@ -170,7 +170,7 @@ % variational energy Iphi = -0.5.*dphi0(indIn)'*iQ*dphi0(indIn) -0.5*sigmaHat.*dy2; -if isweird(Iphi) || isweird(SigmaPhi) || div +if VBA_isWeird ({Iphi, SigmaPhi}) || div Iphi = -Inf; end diff --git a/VBA_Itheta.m b/core/VBA_Itheta.m similarity index 94% rename from VBA_Itheta.m rename to core/VBA_Itheta.m index 4db6922b..b3bd248b 100644 --- a/VBA_Itheta.m +++ b/core/VBA_Itheta.m @@ -18,7 +18,7 @@ % Preallocate intermediate variables iQx = options.priors.iQx; Q = options.priors.SigmaTheta(indIn,indIn); -iQ = VBA_inv(Q,[]); +iQ = VBA_inv(Q); muTheta0 = options.priors.muTheta; Theta = muTheta0; Theta(indIn) = theta; @@ -66,7 +66,7 @@ end % Accelerate divergent update - if isweird({dx2,dF_dX,dF_dTheta}) + if VBA_isWeird ({dx2, dF_dX, dF_dTheta}) div = 1; break end @@ -80,7 +80,7 @@ % posterior covariance matrix iSigmaTheta = iQ + alphaHat.*d2fdx2(indIn,indIn); -SigmaTheta = VBA_inv(iSigmaTheta,[]); +SigmaTheta = VBA_inv(iSigmaTheta); % mode tmp = iQ*dtheta0(indIn) + alphaHat.*ddxdtheta(indIn); @@ -88,7 +88,7 @@ % variational energy Itheta = -0.5.*dtheta0(indIn)'*iQ*dtheta0(indIn) -0.5*alphaHat.*dx2; -if isweird({Itheta,SigmaTheta}) || div +if VBA_isWeird ({Itheta, SigmaTheta}) || div Itheta = -Inf; end diff --git a/VBA_VarParam.m b/core/VBA_VarParam.m similarity index 100% rename from VBA_VarParam.m rename to core/VBA_VarParam.m diff --git a/VBA_check.m b/core/VBA_check.m similarity index 90% rename from VBA_check.m rename to core/VBA_check.m index 554b26af..cf74d9cc 100644 --- a/VBA_check.m +++ b/core/VBA_check.m @@ -14,7 +14,13 @@ %% ________________________________________________________________________ % check model dimension - +% set default if static model +if isempty(f_fname) + dim = VBA_check_struct(dim, ... + 'n_theta', 0, ... + 'n', 0); +end + % check if hidden state dimension: assert( ... isfield(dim,'n') ... exists, @@ -55,6 +61,13 @@ %% ________________________________________________________________________ % check VBA's options structure +if isfield(options,'binomial') % check for legacy binomial field + options.sources = struct('type', options.binomial , ... + 'out' , 1:dim.p ); + options = rmfield(options, 'binomial'); + fprintf('*** WARNING: options.binomial is now deprecated. Please use ''options.sources.type = 1'' to specify a binary observation distribution.\n'); +end + % set defaults options = VBA_check_struct(options, ... 'decim' , 1 , ... % Micro-time resolution @@ -78,21 +91,31 @@ 'verbose' , 1 , ... % matlab window messages 'OnLine' , 0 , ... % On-line version (true when called from VBA_OnLineWrapper.m) 'delays' , [] , ... % delays - 'binomial' , 0 , ... % not binomial data 'kernelSize' , 16 , ... % max lag of Volterra kernels 'nmog' , 1 , ... % split-Laplace VB? 'UNL' , 0 , ... % un-normalized likelihood? 'UNL_width' , 4 , ... % for partition function estimation 'UNL_ng' , 64 ... % for partition function estimation ) ; - + options = VBA_check_struct(options, ... - 'isYout' , zeros(dim.p,dim.n_t) , ... % excluded data - 'skipf' , zeros(1,dim.n_t) , ... % steady evolution - 'sources' , struct('type', options.binomial , ... % multisource - 'out' , 1:dim.p ) ... -) ; + 'isYout' , zeros(dim.p,dim.n_t) , ... % excluded data + 'skipf' , zeros(1,dim.n_t) , ... % steady evolution + 'sources' , struct() ... +) ; +options.sources = VBA_check_struct(options.sources, ... + 'type', 0 , ... % default to gaussian + 'out' , 1:dim.p ); + + +% % retrocompatibility +% if ~isfield(options,'binomial') && numel(options.sources)==1 +% options.binomial = options.sources.type; +% end + + + options.backwardLag = min([max([floor(round(options.backwardLag)),1]),dim.n_t]); options.kernelSize = min([dim.n_t,options.kernelSize]); @@ -101,7 +124,7 @@ if ~isempty(y) isYoutSource = options.isYout(options.sources(i).out,:); ySource = y(options.sources(i).out,:); - if ~isbinary(ySource(~isYoutSource)) + if ~ VBA_isBinary (ySource(~ isYoutSource)) error('*** Data should be binary!') end if islogical(ySource(~isYoutSource)) @@ -130,10 +153,6 @@ assert(size(priors.muTheta,1)==dim.n_theta,'*** Dimension of options.priors.muTheta does not match dim.n_theta!') assert(isequal(size(priors.SigmaTheta),dim.n_theta*[1,1]),'*** Dimension of options.priors.SigmaTheta does not match dim.n_theta!') -% TODO remove the two tests below as they should be already taken care of -% if options.binomial -% priors = rmfield(priors,{'a_sigma','b_sigma'}); -% end if dim.n>0 && isempty(options.params2update.x0) options.updateX0 = 0; end @@ -151,7 +170,9 @@ end end -% ensure excluded data consistency (gaussian sources) +% % ensure excluded data consistency (gaussian sources) +% TODO: remove once VBA_Iphi deal with isYout +if numel(options.sources) == 1 gsi = find([options.sources.type]==0); for i=1:numel(gsi) for t=1:dim.n_t @@ -160,6 +181,7 @@ priors.iQy{t,i} = diag(diQ)*priors.iQy{t,i}*diag(diQ); end end +end % store evolution/observation function handles options.f_fname = f_fname; diff --git a/VBA_check4DCM.m b/core/VBA_check4DCM.m similarity index 100% rename from VBA_check4DCM.m rename to core/VBA_check4DCM.m diff --git a/VBA_check_errors.m b/core/VBA_check_errors.m similarity index 96% rename from VBA_check_errors.m rename to core/VBA_check_errors.m index 002f3f0c..d21c8df0 100644 --- a/VBA_check_errors.m +++ b/core/VBA_check_errors.m @@ -39,7 +39,7 @@ % evaluate observation function at current mode try [gx(:,t),dG_dX,dG_dPhi] = VBA_evalFun('g',posterior.muX(:,t),Phi,u(:,t),options,dim,t); - if isweird(gx(find(options.isYout(:,t)==0),t)) + if VBA_isWeird (gx(find (options.isYout(:, t) == 0), t)) VBA_disp('',options) VBA_disp('Error: could not initialize VB scheme: model generates NaN or Inf!',options) posterior = []; @@ -127,7 +127,7 @@ end % Accelerate divergent update - if isweird({dy(find(options.isYout(:,t)==0),t),dG_dPhi(:,find(options.isYout(:,t)==0)),dG_dX}) + if VBA_isWeird ({dy(find(options.isYout(:,t)==0),t), dG_dPhi(:,find(options.isYout(:,t)==0)), dG_dX}) div = 1; VBA_disp(' ',options) VBA_disp('Error: Output of evolution or observation function is weird (nan or inf) ...',options) diff --git a/core/VBA_disp.m b/core/VBA_disp.m new file mode 100644 index 00000000..3f61a5d6 --- /dev/null +++ b/core/VBA_disp.m @@ -0,0 +1,36 @@ +function VBA_disp (str, options, carriageReturn) +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% VBA_disp (str, options) +% display string or cell-arry of strings according to verbose option +% +% IN: +% - str: string or cell array of strings to display +% - option structure containing the verbose flag (default: true) +% +% ///////////////////////////////////////////////////////////////////////// + +% get flag +try + verbose = options.verbose; +catch + verbose = true; +end + +if nargin < 3 + carriageReturn = true; +end + +% conditional display function +if verbose + if iscell (str) + for i = 1 : numel(str) + fprintf(1, str{i}); + end + else + fprintf (1, str); + end + if carriageReturn + fprintf ('\n'); + end +end \ No newline at end of file diff --git a/VBA_evalFun.m b/core/VBA_evalFun.m similarity index 87% rename from VBA_evalFun.m rename to core/VBA_evalFun.m index 163ef168..9ed0b311 100644 --- a/VBA_evalFun.m +++ b/core/VBA_evalFun.m @@ -49,7 +49,7 @@ nout = options.g_nout; N = 1; if t~=0 && all(options.isYout(:,t)) && ~isequal(fname,@VBA_odeLim) - [fx] = feval(fname,Xt,P,ut,in); + [fx] = fname(Xt,P,ut,in); J = zeros([d(1),d(3)]); dfdP = zeros([d(2),d(3)]); return @@ -64,18 +64,23 @@ if options.checkGrads && ~isequal(fname,@VBA_odeLim) mayPause = 0; if ~isempty(Xt) && nout > 1 - J2 = numericDiff(@EvalFunN,2,fname,Xt,P,ut,in,dim,nout,nout0,options,d,N); + J2 = VBA_numericDiff(@EvalFunN,2,fname,Xt,P,ut,in,dim,nout,nout0,options,d,N); [hf(1)] = VBA_displayGrads(J,J2,'Jacobian',fname,flagFun); mayPause = 1; end if ~isempty(P) && nout > 2 - dfdP2 = numericDiff(@EvalFunN,3,fname,Xt,P,ut,in,dim,nout,nout0,options,d,N); + dfdP2 = VBA_numericDiff(@EvalFunN,3,fname,Xt,P,ut,in,dim,nout,nout0,options,d,N); [hf(2)] = VBA_displayGrads(dfdP,dfdP2,'Gradients wrt parameters',fname,flagFun); mayPause = 1; end if mayPause - pause + pauseState = pause('query'); + pause('on'); + pause; + pause(pauseState); + try close(setdiff(hf,0)) + end end end @@ -106,7 +111,7 @@ deriv = [1 1 1]; switch nout case 3 - [fx,dfdx,dfdp] = feval(fname,Xt,P,ut,in); + [fx,dfdx,dfdp] = fname(Xt,P,ut,in); if isempty(dfdx) deriv(1) = 0; end @@ -114,20 +119,20 @@ deriv(2) = 0; end case 2 - [fx,dfdx] = feval(fname,Xt,P,ut,in); + [fx,dfdx] = fname(Xt,P,ut,in); deriv(2) = 0; if isempty(dfdx) deriv(1) = 0; end case 1 - [fx] = feval(fname,Xt,P,ut,in); + [fx] = fname(Xt,P,ut,in); deriv(1:2) = 0; end if ~deriv(1) && dim.n>0 && nout0>=2 - dfdx = numericDiff(fname,1,Xt,P,ut,in); + dfdx = VBA_numericDiff(fname,1,Xt,P,ut,in); end if d(2) > 0 && ~deriv(2) && nout0==3 - dfdp = numericDiff(fname,2,Xt,P,ut,in); + dfdp = VBA_numericDiff(fname,2,Xt,P,ut,in); end diff --git a/VBA_fillInPriors.m b/core/VBA_fillInPriors.m similarity index 100% rename from VBA_fillInPriors.m rename to core/VBA_fillInPriors.m diff --git a/VBA_getDefaults.m b/core/VBA_getDefaults.m similarity index 96% rename from VBA_getDefaults.m rename to core/VBA_getDefaults.m index c5cd398a..ab9f6f3d 100644 --- a/VBA_getDefaults.m +++ b/core/VBA_getDefaults.m @@ -36,7 +36,7 @@ options.finalEval = 'Externally-specified function evaluation: end of VB algo'; options.figName = 'Name of the display figure'; options.delays = 'dim.nX1 vector for delay embedding'; -options.binomial = 'flag for binomial observations'; +options.sources = 'Type and dimension of the data distribution(s)'; diff --git a/VBA_getU.m b/core/VBA_getU.m similarity index 92% rename from VBA_getU.m rename to core/VBA_getU.m index 42a6a039..1f774496 100644 --- a/VBA_getU.m +++ b/core/VBA_getU.m @@ -17,7 +17,7 @@ nut = dim.u*options.decim; uu = zeros(nut,dim.n_t); for t=1:dim.n_t - uu(:,t) = vec(u0(:,(t-1)*options.decim+1:t*options.decim)); + uu(:,t) = VBA_vec(u0(:,(t-1)*options.decim+1:t*options.decim)); end case 'back2micro' u0 = u; diff --git a/core/VBA_get_dL.m b/core/VBA_get_dL.m new file mode 100644 index 00000000..f89e9fea --- /dev/null +++ b/core/VBA_get_dL.m @@ -0,0 +1,66 @@ +function [ddydphi, d2gdx2, logL, dy, dy2, vy]= VBA_get_dL (gx, dG_dPhi, y, type, Qy, sigmaHat) +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% [ddydphi, d2gdx2, logL, dy, dy2, vy]= VBA_get_dL (gx, dG_dPhi, y, type, Qy, sigmaHat) +% Compute usefull intermediate values describing the misfit between a model +% prediction and an observation. +% +% IN: +% - gx: model prediction about the observation y (1st order moment) +% - dG_dPhi: derivative of gx wrt observation parameters +% - y: actual observation +% - type: flag defining the distribution of the observation (0: gaussian, +% 1: Bernouilli, 2: categorical) +% - Qy: scaling matrix of the 2nd order moment of the prediction (only +% gaussian observations) +% - sigmaHat: scaling factor of the 2nd order moment of the prediction +% (only gaussian observations) +% +% OUT: +% - ddydphi: gradient of the prediction error wrt observation parameters +% - d2gdx2: hessian of the prediction +% - logL: log-likelihood of the observation given the prediction +% - dy: prediction error +% - dy2: normalized squared deviation +% - vy: prediction variance +% +% ///////////////////////////////////////////////////////////////////////// + +if nargin<5 +end + +% prediction error +dy = y - gx; + +switch type + + case 0 %--- normal + vy=(1./sigmaHat).*diag(VBA_inv(Qy)); + ddydphi = sigmaHat.*(dG_dPhi*Qy*dy); + d2gdx2 = sigmaHat.*(dG_dPhi*Qy*dG_dPhi'); + dy2=dy'*Qy*dy ; + logL = - 0.5*sigmaHat.*dy2 ; + logL = logL + 0.5*VBA_logDet(Qy*sigmaHat) - 0.5*numel(dy2)*log(2*pi) ; + + case 1 %--- binomial + gx = VBA_finiteBinomial (gx); + vy = gx.*(1-gx) ; + ddydphi = dG_dPhi*(dy./vy); + temp = y./(gx).^2 - (y-1)./(1-gx).^2; + d2gdx2 = dG_dPhi*diag(temp)*dG_dPhi'; + logL = y'*log(gx) + (1-y)'*log(1-gx); + dy2 = sum(temp); + + case 2 %--- multinomial + gx = VBA_finiteBinomial (gx); + vy = gx.*(1-gx) ; + ddydphi = dG_dPhi*(y./gx); + d2gdx2 = dG_dPhi*diag(y./gx.^2)*dG_dPhi'; + dy2 = sum(y./(gx).^2); + logL = log(gx)'*y; + +end + + + +end \ No newline at end of file diff --git a/VBA_microTime.m b/core/VBA_microTime.m similarity index 91% rename from VBA_microTime.m rename to core/VBA_microTime.m index 391e38b6..3e33714d 100644 --- a/VBA_microTime.m +++ b/core/VBA_microTime.m @@ -57,14 +57,14 @@ if options.skipf(t) x(:,indT) = f_Id(x(:,indT-1),theta,u(:,tu),[]); else - x(:,indT) = feval(options.f_fname,x(:,indT-1),theta,u(:,tu),options.inF); + x(:,indT) = options.f_fname(x(:,indT-1),theta,u(:,tu),options.inF); end if isfield(out,'suffStat') && isfield(out.suffStat,'dx') && ... ~isempty(out.suffStat.dx) && isequal(i,options.decim) % add state noise to evolution mapping x(:,indT) = x(:,indT) + out.suffStat.dx(:,t); end - [gx(:,indT)] = feval(options.g_fname,x(:,indT),phi,u(:,tu),options.inG); + [gx(:,indT)] = options.g_fname(x(:,indT),phi,u(:,tu),options.inG); end end diff --git a/subfunctions/VBA_multisession_expand.m b/core/VBA_multisession_expand.m similarity index 98% rename from subfunctions/VBA_multisession_expand.m rename to core/VBA_multisession_expand.m index d06b72b7..ac397d5c 100644 --- a/subfunctions/VBA_multisession_expand.m +++ b/core/VBA_multisession_expand.m @@ -51,6 +51,7 @@ %% duplicate parameters +options = VBA_fillInPriors (dim, options); priors_multi = options.priors; % = get indexes of duplicated parameters @@ -147,7 +148,7 @@ end % call original function nout = nargout(multisession.f_fname); - [output{1:nout}] = feval(multisession.f_fname, ... + [output{1:nout}] = multisession.f_fname( ... Xt(idx_X0), ... Theta(idx_theta), ... ut(1:end-1),... @@ -189,7 +190,7 @@ % call original function nout = nargout(multisession.g_fname); - [output{1:nout}] = feval(multisession.g_fname, ... + [output{1:nout}] = multisession.g_fname( ... Xt(idx_X0), ... Phi(idx_phi), ... ut(1:end-1),... diff --git a/subfunctions/VBA_multisession_factor.m b/core/VBA_multisession_factor.m similarity index 100% rename from subfunctions/VBA_multisession_factor.m rename to core/VBA_multisession_factor.m diff --git a/VBA_odeLim.m b/core/VBA_odeLim.m similarity index 100% rename from VBA_odeLim.m rename to core/VBA_odeLim.m diff --git a/VBA_odeLim2NLSS.m b/core/VBA_odeLim2NLSS.m similarity index 100% rename from VBA_odeLim2NLSS.m rename to core/VBA_odeLim2NLSS.m diff --git a/VBA_onlineWrapper.m b/core/VBA_onlineWrapper.m similarity index 91% rename from VBA_onlineWrapper.m rename to core/VBA_onlineWrapper.m index 8671be87..6c03a0e7 100644 --- a/VBA_onlineWrapper.m +++ b/core/VBA_onlineWrapper.m @@ -9,6 +9,7 @@ % variables. See VBA_NLStateSpaceModel.m for I/O arguments. % NB: this online wrapper does not deal with ODE state-space models. + tStart = tic; %------------------ Check input consistency ---------------% @@ -21,6 +22,13 @@ options.OnLine = 1; [options,u,dim] = VBA_check(y,u,f_fname,g_fname,dim,options); +if numel(options.sources) > 1 + error('*** online inversion is not yet compatible with multisource observations'); +end + +if options.sources.type > 1 + error('*** online inversion is not yet compatible with multinomial observations'); +end %------------------- Initialize variables ------------------% [suffStat] = VBA_getSuffStat(options,[],1); @@ -37,7 +45,7 @@ end posterior.a_alpha = zeros(1,dim.n_t); posterior.b_alpha = zeros(1,dim.n_t); -if ~options.binomial +if options.sources.type == 0 posterior.a_sigma = zeros(1,dim.n_t); posterior.b_sigma = zeros(1,dim.n_t); end @@ -78,6 +86,7 @@ OL_options.updateX0 = 1; OL_options.GnFigs = 0; OL_options.DisplayWin = 0; +OL_options.isYout = options.isYout(:,1); OL_dim.n_t=1; OL_y = y(:,1); OL_u = u(:,1); @@ -150,15 +159,17 @@ OL_posterior,OL_out,posterior,suffStat,t,options); % update display - VBA_updateDisplay(posterior,suffStat,options,y(:,1:t),t,'precisions') + options_OL = options; + options_OL.isYout = options_OL.isYout(:,1:t); + VBA_updateDisplay(posterior,suffStat,options_OL,y(:,1:t),t,'precisions') if dim.n_phi > 0 - VBA_updateDisplay(posterior,suffStat,options,y(:,1:t),t,'phi') + VBA_updateDisplay(posterior,suffStat,options_OL,y(:,1:t),t,'phi') end if dim.n > 0 - VBA_updateDisplay(posterior,suffStat,options,y(:,1:t),t,'X') + VBA_updateDisplay(posterior,suffStat,options_OL,y(:,1:t),t,'X') end if dim.n_theta > 0 - VBA_updateDisplay(posterior,suffStat,options,y(:,1:t),t,'theta') + VBA_updateDisplay(posterior,suffStat,options_OL,y(:,1:t),t,'theta') end end @@ -235,7 +246,7 @@ posterior.a_alpha(t) = []; posterior.b_alpha(t) = []; end -if ~options.binomial +if options.sources.type == 0 try posterior.a_sigma(t) = OL_posterior.a_sigma; posterior.b_sigma(t) = OL_posterior.b_sigma; @@ -250,7 +261,7 @@ suffStat.dx(:,t) = OL_out.suffStat.dx; suffStat.dy(:,t) = OL_out.suffStat.dy; suffStat.Salpha = OL_out.suffStat.Salpha; -if ~options.binomial +if options.sources.type == 0 suffStat.Ssigma = OL_out.suffStat.Ssigma; else suffStat.logL = OL_out.suffStat.logL; diff --git a/VBA_priors.m b/core/VBA_priors.m similarity index 96% rename from VBA_priors.m rename to core/VBA_priors.m index fd82b3db..8cd79d3a 100644 --- a/VBA_priors.m +++ b/core/VBA_priors.m @@ -25,7 +25,7 @@ % check for sources for backward compatibility if ~isfield(options,'sources') - options.sources = struct('out',1:dim.p,'type',options.binomial); + error('Please specify the observation distribution using options.sources.\n'); end % prior Gamma pdf of the measurement noise (Jeffrey) diff --git a/VBA_wrapup.m b/core/VBA_wrapup.m similarity index 98% rename from VBA_wrapup.m rename to core/VBA_wrapup.m index c872f2fb..295b5636 100644 --- a/VBA_wrapup.m +++ b/core/VBA_wrapup.m @@ -26,7 +26,7 @@ out.suffStat = suffStat; out.date = clock; out.dt = toc(options.tStart); -out.fit = VBA_fit(posterior,out); +out.fit = VBA_getFit(posterior,out); if fromPause return end diff --git a/VBA_getDiagnostics.m b/core/diagnostics/VBA_getDiagnostics.m similarity index 98% rename from VBA_getDiagnostics.m rename to core/diagnostics/VBA_getDiagnostics.m index 5b6ca402..cdf37af2 100644 --- a/VBA_getDiagnostics.m +++ b/core/diagnostics/VBA_getDiagnostics.m @@ -25,12 +25,12 @@ y = out.y; % get goodness-tof-fit metrics -try; out.fit; catch; out.fit = VBA_fit(posterior,out); end +try; out.fit; catch; out.fit = VBA_getFit(posterior,out); end % derive Volterra kernels if out.dim.n_t>1 && out.options.kernelSize>0 try - kernels = VBA_VolterraKernels(posterior,out); + kernels = VBA_getVolterraKernels(posterior,out); catch VBA_disp(' *** could not derive kernels!\n',out.options); kernels = []; @@ -241,7 +241,7 @@ S(ind+1:ind+out.dim.n,ind+1:ind+out.dim.n) = SP; end end -C = cov2corr(S); +C = VBA_cov2corr(S); C = C + diag(NaN.*diag(C)); tick = [0]; ltick = []; @@ -290,7 +290,7 @@ % weigths residuals according to (state/data) precision matrix wdx = zeros(size(dx)); for t = 1:size(dx,2) - sqrtiQ = VBA_getISqrtMat(iQx{t},0); + sqrtiQ = VBA_sqrtm (iQx{t}); wdx(:,t) = sqrtiQ*dx(:,t); end diff --git a/VBA_fit.m b/core/diagnostics/VBA_getFit.m similarity index 65% rename from VBA_fit.m rename to core/diagnostics/VBA_getFit.m index 96801257..a443ffbf 100644 --- a/VBA_fit.m +++ b/core/diagnostics/VBA_getFit.m @@ -1,6 +1,6 @@ -function fit = VBA_fit(posterior,out) +function fit = VBA_getFit(posterior,out) % derives standard model fit accuracy metrics -% function fit = VBA_fit(posterior,out) +% function fit = VBA_getFit(posterior,out) % IN: % - posterior/out: output structures of VBA_NLStateSpaceModel.m % OUT: @@ -47,7 +47,7 @@ si=gsi(i); idx = out.options.sources(si).out; % sample size - fit.ny(si) = sum(1-vec(out.options.isYout(idx,:))); + fit.ny(si) = sum(1-VBA_vec(out.options.isYout(idx,:))); % log-likelihood if out.options.UNL % to be rationalized... fit.LL = out.suffStat.logL; @@ -64,13 +64,8 @@ fit.AIC(si) = fit.LL(si) - fit.np; fit.BIC(si) = fit.LL(si) - 0.5*fit.np.*log(fit.ny(si)); % coefficient of determination - y_temp = out.y(idx,:); - y_temp = y_temp(out.options.isYout(idx,:) == 0); - gx_temp = suffStat.gx(idx,:); - gx_temp = gx_temp(out.options.isYout(idx,:) == 0); - SS_tot = sum((vec(y_temp)-mean(vec(y_temp))).^2); - SS_err = sum((vec(y_temp)-vec(gx_temp)).^2); - fit.R2(si) = 1-(SS_err/SS_tot); + fit.R2(si) = VBA_r2 (suffStat.gx(idx,:), out.y(idx,:), out.options.isYout(idx,:)); + % classification accuracies [irrelevant] fit.acc(si) = NaN; fit.bacc(si) = NaN; @@ -83,30 +78,18 @@ si=bsi(i); idx = out.options.sources(si).out; % sample size - fit.ny(si) = sum(1-vec(out.options.isYout(idx,:))); + fit.ny(si) = sum(1-VBA_vec(out.options.isYout(idx,:))); % log-likelihood fit.LL(si) = out.suffStat.logL(si); % AIC/BIC fit.AIC(si) = fit.LL(si) - fit.np; fit.BIC(si) = fit.LL(si) - 0.5*fit.np.*log(fit.ny(si)); % coefficient of determination - y_temp = out.y(idx,:); - y_temp = y_temp(out.options.isYout(idx,:) == 0); - gx_temp = suffStat.gx(idx,:); - gx_temp = gx_temp(out.options.isYout(idx,:) == 0); - SS_tot = sum((vec(y_temp)-mean(vec(y_temp))).^2); - SS_err = sum((vec(y_temp)-vec(gx_temp)).^2); - fit.R2(si) = 1-(SS_err/SS_tot); - % classification accuracies - bg = gx_temp>.5; % binarized model predictions - tp = sum(vec(y_temp).*vec(bg)); % true positives - fp = sum(vec(1-y_temp).*vec(bg)); % false positives - fn = sum(vec(y_temp).*vec(1-bg)); % false positives - tn = sum(vec(1-y_temp).*vec(1-bg)); %true negatives - P = tp + fn; - N = tn + fp; - fit.acc(si) = (tp+tn)./(P+N); - fit.bacc(si) = 0.5*(tp./P + tn./N); + fit.R2(si) = VBA_r2 (suffStat.gx(idx,:), out.y(idx,:), out.options.isYout(idx,:)); + + % classification accuracies + [fit.acc(si), fit.bacc(si)] = VBA_accuracy (suffStat.gx(idx,:), out.y(idx,:), 1, out.options.isYout(idx,:)); + end % 3- multinomial sources: goodness-of-fit @@ -121,18 +104,11 @@ % AIC/BIC fit.AIC(si) = fit.LL(si) - fit.np; fit.BIC(si) = fit.LL(si) - 0.5*fit.np.*log(fit.ny(si)); - % coefficient of determination - y_temp = out.y(idx,:); - y_temp = y_temp(out.options.isYout(idx,:) == 0); - gx_temp = suffStat.gx(idx,:); - gx_temp = gx_temp(out.options.isYout(idx,:) == 0); - SS_tot = sum((vec(y_temp)-mean(vec(y_temp))).^2); - SS_err = sum((vec(y_temp)-vec(gx_temp)).^2); - fit.R2(si) = 1-(SS_err/SS_tot); + % coefficient of determination + fit.R2(si) = VBA_r2 (suffStat.gx(idx,:), out.y(idx,:), out.options.isYout(idx,:)); + % classification accuracies [to be rationalized!] - fit.acc(si) = NaN; - fit.bacc(si) = NaN; -% fit.acc(si) = multinomial_accuracy(suffStat.gx(idx,:),out.y(idx,:),out.options.isYout(idx,:)); + [fit.acc(si), fit.bacc(si)] = VBA_accuracy (suffStat.gx(idx,:), out.y(idx,:), 2, out.options.isYout(idx,:)); end diff --git a/VBA_getSuffStat.m b/core/diagnostics/VBA_getSuffStat.m similarity index 96% rename from VBA_getSuffStat.m rename to core/diagnostics/VBA_getSuffStat.m index b609931e..65a6431c 100644 --- a/VBA_getSuffStat.m +++ b/core/diagnostics/VBA_getSuffStat.m @@ -10,7 +10,11 @@ if ~exist('suffStat','var'), suffStat = struct(); end dim = options.dim; -dim.online = elvis(flag, dim.n_t, 1); +if flag + dim.online = dim.n_t; +else + dim.online = 1; +end suffStat = VBA_check_struct(suffStat, ... 'F' , [] , ... diff --git a/VBA_VolterraKernels.m b/core/diagnostics/VBA_getVolterraKernels.m similarity index 88% rename from VBA_VolterraKernels.m rename to core/diagnostics/VBA_getVolterraKernels.m index 93f3eabf..de2e85f7 100644 --- a/VBA_VolterraKernels.m +++ b/core/diagnostics/VBA_getVolterraKernels.m @@ -1,6 +1,6 @@ -function [kernels] = VBA_VolterraKernels(posterior,out,nt) +function [kernels] = VBA_getVolterraKernels(posterior,out,nt) % Estimation of the system's 1st-order Volterra kernels -% function [my,vy,mg,vg,mx,vx] = VBA_VolterraKernels(posterior,out) +% function [my,vy,mg,vg,mx,vx] = VBA_getVolterraKernels(posterior,out) % IN: % - posterior,out: the output of the VBA system inversion. Note that the % Volterra kernels are estimated given the input stored in out.u. @@ -40,7 +40,7 @@ else % do not change input u = out.u(:,1:out.dim.n_t); end -if isweird(u) +if VBA_isWeird (u) VBA_disp('Warning: zero-padding weird inputs for Volterra decompositions.',out.options) i0 = isinf(u) | isnan(u) | ~isreal(u); u(i0) = 0; @@ -51,19 +51,12 @@ end % configurate kernel estimation -if out.options.binomial - g_fname = @g_convSig; - opt.binomial = 1; -else - g_fname = @g_conv0; - opt.binomial = 0; -end [opt.inG.dgdp] = VBA_conv2glm(u,nt); % build convolution matrices if isfield(out.options,'detrendU') && ~~out.options.detrendU VBA_disp('Warning: detrending inputs for Volterra decompositions.',out.options) Trend = []; for i=0:out.options.detrendU - Trend = [Trend,vec(1:out.dim.n_t).^i]; + Trend = [Trend,VBA_vec(1:out.dim.n_t).^i]; end Trend = VBA_orth(Trend,1); opt.inG.dgdp = [opt.inG.dgdp;Trend(:,2:out.options.detrendU+1)']; @@ -89,10 +82,21 @@ for k = 1:p y = out.y(k,:)'; if var(y)>eps % only if var(y)>0 - if ~opt.binomial - opt.priors.a_sigma = 1; - opt.priors.b_sigma = var(y); + + % find source tpye + sInd = cellfun(@(x) ismember(k,x), {out.options.sources.out}); + switch out.options.sources(sInd).type + case 0 + g_fname = @g_conv0; + opt.sources.type = 0; + opt.priors.a_sigma = 1; + opt.priors.b_sigma = var(y); + case {1,2} + g_fname = @g_convSig; + opt.sources.type = 1; end + + [pk,ok] = VBA_NLStateSpaceModel(y,[],[],g_fname,dim,opt); kernels.y.R2(k) = ok.fit.R2; if out.options.verbose @@ -114,7 +118,7 @@ end % 2- Volterra kernels of simulated system -opt.binomial = 0; +opt.sources.type = 0; kernels.g.m = zeros(p,nt,nu); kernels.g.v = zeros(p,nt,nu); kernels.g.R2 = zeros(p,1); diff --git a/stats&plots/MoveAxisToOrigin.m b/core/display/MoveAxisToOrigin.m similarity index 100% rename from stats&plots/MoveAxisToOrigin.m rename to core/display/MoveAxisToOrigin.m diff --git a/stats&plots/Plot3AxisAtOrigin.m b/core/display/Plot3AxisAtOrigin.m similarity index 100% rename from stats&plots/Plot3AxisAtOrigin.m rename to core/display/Plot3AxisAtOrigin.m diff --git a/stats&plots/PlotAxisAtOrigin.m b/core/display/PlotAxisAtOrigin.m similarity index 100% rename from stats&plots/PlotAxisAtOrigin.m rename to core/display/PlotAxisAtOrigin.m diff --git a/VBA_Bin2Cont.m b/core/display/VBA_Bin2Cont.m similarity index 100% rename from VBA_Bin2Cont.m rename to core/display/VBA_Bin2Cont.m diff --git a/VBA_classification_display.m b/core/display/VBA_classification_display.m similarity index 99% rename from VBA_classification_display.m rename to core/display/VBA_classification_display.m index 9fb7a586..29dc5b7a 100644 --- a/VBA_classification_display.m +++ b/core/display/VBA_classification_display.m @@ -149,7 +149,7 @@ set(all.handles.hf,'userdata',all) -try,getSubplots;end +try, VBA_getSubplots (); end diff --git a/VBA_displayGrads.m b/core/display/VBA_displayGrads.m similarity index 96% rename from VBA_displayGrads.m rename to core/display/VBA_displayGrads.m index 63615eab..a12fb8b4 100644 --- a/VBA_displayGrads.m +++ b/core/display/VBA_displayGrads.m @@ -55,11 +55,11 @@ maj = max([J(:);J2(:)]); imagesc(J,'parent',ha),colorbar xlabel(ha,'analytic') - set(ha,'clim',[mij,maj]) + set(ha,'clim',[mij,maj+eps]) ha = subplot(2,2,4,'parent',hf(1)); imagesc(J2,'parent',ha),colorbar xlabel(ha,'numerical') - set(ha,'clim',[mij,maj]) + set(ha,'clim',[mij,maj+eps]) end try; getsubplots; end diff --git a/VBA_displayGroupBMC.m b/core/display/VBA_displayGroupBMC.m similarity index 99% rename from VBA_displayGroupBMC.m rename to core/display/VBA_displayGroupBMC.m index e21bda54..18825f3b 100644 --- a/VBA_displayGroupBMC.m +++ b/core/display/VBA_displayGroupBMC.m @@ -164,7 +164,7 @@ end drawnow -try;getSubplots;end +try; VBA_getSubplots (); end diff --git a/VBA_displayGroupBMCbtw.m b/core/display/VBA_displayGroupBMCbtw.m similarity index 95% rename from VBA_displayGroupBMCbtw.m rename to core/display/VBA_displayGroupBMCbtw.m index e3dd2746..438b693c 100644 --- a/VBA_displayGroupBMCbtw.m +++ b/core/display/VBA_displayGroupBMCbtw.m @@ -36,7 +36,7 @@ for n=1:ns for i=1:nc for j=1:nc - tmp = corrcoef(vec(out.L(:,n,i)),vec(out.L(:,n,j))); + tmp = corrcoef(VBA_vec(out.L(:,n,i)),VBA_vec(out.L(:,n,j))); C(i,j,n) = tmp(2,1); end end @@ -60,7 +60,7 @@ tmp = []; for i=1:nf indf = out.options.families{i}; - tmp = [tmp;vec(indf)]; + tmp = [tmp;VBA_vec(indf)]; C(indf,i) = 1; end C = ~C; % for display purposes (black => model belongs to the family) @@ -76,7 +76,7 @@ % display per-condition family-BMS C = zeros(nf,nc); for i=1:nc - C(:,i) = vec(out.VBA.cond(i).out.families.ep); + C(:,i) = VBA_vec(out.VBA.cond(i).out.families.ep); end handles.ha(5) = subplot(3,2,5,'parent',handles.hf,'nextplot','add'); hi = imagesc(C','parent',handles.ha(5)); @@ -93,7 +93,7 @@ % display per-condition BMS C = zeros(nm,nc); for i=1:nc - C(:,i) = vec(out.VBA.cond(i).out.ep); + C(:,i) = VBA_vec(out.VBA.cond(i).out.ep); end handles.ha(5) = subplot(3,2,5,'parent',handles.hf,'nextplot','add'); hi = imagesc(C','parent',handles.ha(5)); @@ -136,7 +136,7 @@ VBA_title(handles.ha(4),'design') set(handles.ha(4),'xlim',[0.5,nc+0.5],'xtick',[1:nc],'ylim',[0.5,nf+0.5],'ytick',[1:nf]) handles.hc(end+1) = colorbar('peer',handles.ha(4)); - set(handles.hc(end),'ytick',1:max(vec(C))) + set(handles.hc(end),'ytick',1:max(VBA_vec(C))) drawnow % display pEP of per-factor btw-condition stability @@ -175,7 +175,7 @@ end -try;getSubplots;end +try;VBA_getSubplots ();end diff --git a/VBA_displayMFX.m b/core/display/VBA_displayMFX.m similarity index 99% rename from VBA_displayMFX.m rename to core/display/VBA_displayMFX.m index f0940fd4..7d2fcb09 100644 --- a/VBA_displayMFX.m +++ b/core/display/VBA_displayMFX.m @@ -196,7 +196,7 @@ set(display.ha(4,1),'xtick',[0,o_group.it],'xticklabel',xl,'box','off'); end -getSubplots +VBA_getSubplots (); function [] = VBAMFX_displayWithin(i1,i2) diff --git a/core/display/VBA_figure.m b/core/display/VBA_figure.m new file mode 100644 index 00000000..6a1d5afe --- /dev/null +++ b/core/display/VBA_figure.m @@ -0,0 +1,17 @@ +function h = VBA_figure (varargin) +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% h = VBA_figure (varargin) +% surcharge of the figure function with some styling +% +% IN: optional arguments for the call to figure() +% OUT: handle to the create figure +% +% ///////////////////////////////////////////////////////////////////////// + +options = [{ ... + 'Color', 'white', ... + 'MenuBar', 'none' ... + }, varargin]; + +h = figure (options{:}); \ No newline at end of file diff --git a/VBA_initDisplay.m b/core/display/VBA_initDisplay.m similarity index 98% rename from VBA_initDisplay.m rename to core/display/VBA_initDisplay.m index d0993a44..75ff6045 100644 --- a/VBA_initDisplay.m +++ b/core/display/VBA_initDisplay.m @@ -81,7 +81,7 @@ % Create axes for measured and predicted data if options.dim.n_t == 1 xlim = [1,options.dim.p]; - xl = 'data dimensions'; + xl = 'data dimension'; else xlim = [1,options.dim.n_t]; xl = 'time'; @@ -196,6 +196,10 @@ styleLabel(ylabel(h, ... sprintf('',data_conditioner))); end +if (options.dim.n_t == 1) + styleLabel(xlabel(h, ... + sprintf('state dimension'))); +end display.ha(3) = h; % 4) Initial state @@ -360,7 +364,7 @@ display.ho = uicontrol('parent',display.hfp,'style','text','tag','VBLaplace','units','normalized','position',[0.20 0.010 0.60 0.02],'backgroundcolor',[1,1,1]); display.hm(1) = uicontrol('parent',display.hfp,'style','text','tag','VBLaplace','units','normalized','position',[0.28 0.035 0.40 0.02],'backgroundcolor',[1,1,1]); display.hm(2) = uicontrol('parent',display.hfp,'style','text','tag','VBLaplace','units','normalized','position',[0.68 0.035 0.10 0.02],'backgroundcolor',[1,1,1]); -display.htt(1) = uicontrol('parent',display.hfp,'style','text','tag','VBLaplace','units','normalized','position',[0.75 0.970 0.25 0.02],'backgroundcolor',[1,1,1]); +display.htt(1) = uicontrol('parent',display.hfp,'style','text','tag','VBLaplace','units','normalized','position',[0.70 0.965 0.30 0.02],'backgroundcolor',[1,1,1]); % Create 'pause' uicontrol button if ~isfield(options,'noPause') || ~options.noPause @@ -373,7 +377,7 @@ %% actually display drawnow try - getSubplots + VBA_getSubplots (); end %% save handles and options diff --git a/VBA_pause.m b/core/display/VBA_pause.m similarity index 80% rename from VBA_pause.m rename to core/display/VBA_pause.m index 4e9d1a8f..692f11fd 100644 --- a/VBA_pause.m +++ b/core/display/VBA_pause.m @@ -2,14 +2,13 @@ % used to pause the VB inversion from the GUI interactively % NB: when paused, the inversion allows interactive diagnosis... +if ~ options.DisplayWin + return; +end try dt = toc(options.tStart); - if floor(dt./60) == 0 - timeString = [num2str(floor(dt)),' sec']; - else - timeString = [num2str(floor(dt./60)),' min']; - end - set(options.display.htt,'string',['Elapsed time: ',timeString]) + timeString = sprintf('Elapsed time: %d min and %d sec', floor(dt/60), round(rem(dt,60))); + set(options.display.htt, 'string', timeString); end try @@ -35,7 +34,10 @@ end while ~stop pause(2) - if ~get(hpause,'value') + if ~ ishandle(hfp) + set(hpause, 'value', 0); + end + if ~ get(hpause,'value') stop = 1; set(hpause,'string','pause and diagnose?',... 'backgroundColor',0.8*[1 1 1]) diff --git a/VBA_summary.m b/core/display/VBA_summary.m similarity index 95% rename from VBA_summary.m rename to core/display/VBA_summary.m index 888e15d6..3a881430 100644 --- a/VBA_summary.m +++ b/core/display/VBA_summary.m @@ -41,10 +41,17 @@ ' - evolution parameters: n_theta=',num2str(out.dim.n_theta),'\n ',... ' - observation parameters: n_phi=',num2str(out.dim.n_phi),'\n ',... ' - inputs: n_u=',num2str(out.dim.u)]); -if out.options.binomial - tmp = ' (binomial data)'; +if numel(out.options.sources)>1 + tmp = ' (multisource)'; else - tmp = []; + switch out.options.sources.type + case 0 + tmp = ' (gaussian data)'; + case 1 + tmp = ' (binomial data)'; + case 2 + tmp = ' (multinomial data)'; + end end if out.options.UNL so = 'un-normalized likelihood'; diff --git a/VBA_summaryMFX.m b/core/display/VBA_summaryMFX.m similarity index 64% rename from VBA_summaryMFX.m rename to core/display/VBA_summaryMFX.m index 4d10d820..7f8f1b70 100644 --- a/VBA_summaryMFX.m +++ b/core/display/VBA_summaryMFX.m @@ -24,7 +24,7 @@ ' - hidden states: n=',num2str(out.options.dim.n),'\n ',... ' - evolution parameters: n_theta=',num2str(out.options.dim.n_theta),'\n ',... ' - observation parameters: n_phi=',num2str(out.options.dim.n_phi)]); -str{4} = sprintf(['Within-subject generative model:','\n']); +str{4} = sprintf('\n Within-subject generative model:\n'); if isa(out.options.g_fname,'function_handle') gfn = func2str(out.options.g_fname); else @@ -37,25 +37,33 @@ ffn = out.options.f_fname; end str{4} = sprintf([str{4},... - ' - observation function: ',gfn,'\n',... - ' - evolution function: ',ffn]); + ' - observation function: ',gfn,'\n',... + ' - evolution function: ',ffn]); else - str{4} = sprintf([str{4},' - observation function: ',gfn]); + str{4} = sprintf([str{4},' - observation function: ',gfn]); end mF = mean(out.within_fit.F(:)); sF = std(out.within_fit.F(:)); mF0 = mean(out.within_fit.LLH0(:)); sF0 = std(out.within_fit.LLH0(:)); str{5} = sprintf([... - ' - Bayesian log model evidences: = ',num2str(mF,'%4.3e'),' +/- ',num2str(sF,'%4.3e'),'\n',... - ' - Bayesian log evidences under the null: = ',num2str(mF0,'%4.3e'),' +/- ',num2str(sF0,'%4.3e')]); -if ~out.options.binomial + '\n - Bayesian log model evidences: = ',num2str(mF,'%4.3e'),' +/- ',num2str(sF,'%4.3e'),'\n',... + ' - Bayesian log evidences under the null: = ',num2str(mF0,'%4.3e'),' +/- ',num2str(sF0,'%4.3e')]); + +if any([out.options.sources.type]==0) R2str = 'coefficient of determination: '; -else + mR = mean(out.within_fit.R2); + sR = std(out.within_fit.R2); + str{6} = sprintf([... + '\n - ',R2str,' = ',num2str(mR,'%4.3f '),' +/- ',num2str(sR,'%4.3e ')]); +end + +if any([out.options.sources.type]>0) R2str = 'balanced classification accuracy '; + mR = mean(out.within_fit.R2); + sR = std(out.within_fit.R2); + str{7} = sprintf([... + '\n - ',R2str,' = ',num2str(mR,'%4.3f '),' +/- ',num2str(sR,'%4.3f ')]); end -mR = mean(out.within_fit.R2(:)); -sR = std(out.within_fit.R2(:)); -str{6} = sprintf([... - ' - ',R2str,' = ',num2str(mR,'%4.3f'),' +/- ',num2str(sR,'%4.3e')]); + diff --git a/stats&plots/VBA_title.m b/core/display/VBA_title.m similarity index 100% rename from stats&plots/VBA_title.m rename to core/display/VBA_title.m diff --git a/VBA_updateDisplay.m b/core/display/VBA_updateDisplay.m similarity index 61% rename from VBA_updateDisplay.m rename to core/display/VBA_updateDisplay.m index 42b4b292..c4226134 100644 --- a/VBA_updateDisplay.m +++ b/core/display/VBA_updateDisplay.m @@ -16,7 +16,14 @@ function VBA_updateDisplay(posterior,suffStat,options,y,it,flag) % replace source selector callback if needed ud = get(getPanel(display.hfp),'userdata') ; -ud.update_plot = @() VBA_updateDisplay(posterior,suffStat,options,y,0,'Y'); + + function update_plot () + cla(display.ha(1)); + cla(display.ha(2)); + VBA_updateDisplay(posterior,suffStat,options,y,0,'Y'); + end + +ud.update_plot = @update_plot; set(getPanel(display.hfp),'userdata',ud) ; ud = VBA_check_struct(ud,'currentSource', 1 ); @@ -112,20 +119,48 @@ function VBA_updateDisplay(posterior,suffStat,options,y,it,flag) vphi = VBA_getVar(posterior.SigmaPhi,indEnd); end -% check time dimension +%% Vertical time encoding if isequal(dTime,1) && size(y,1) > 1 - gx = gx'; - vy = vy'; - y = y'; - if options.dim.n > 0 + + n_s = numel(options.sources); + n_t = max(cellfun(@numel,{options.sources.out})); + + new_y = nan(n_s,n_t); + new_gx = nan(n_s,n_t); + new_vy = nan(n_s,n_t); + new_isYout = ones(n_s,n_t); + + for si=1:n_s + s_idx = options.sources(si).out; + + n_t_s = numel(options.sources(si).out); + + new_y(si, 1:n_t_s) = y(s_idx)'; + new_gx(si, 1:n_t_s) = gx(s_idx)'; + new_vy(si, 1:n_t_s) = vy(s_idx)'; + + try + new_isYout(si, 1:n_t_s) = options.isYout(s_idx)'; + catch + new_isYout(si, 1:n_t_s) = 0; + end + + end + + y = new_y; + vy = new_vy; + gx = new_gx; + options.isYout = new_isYout; + + if options.dim.n > 0 mux = mux'; vx = vx'; end - try - options.isYout = options.isYout'; - end - dTime = 1:size(y,2); - for si=1:numel(options.sources) + + dTime = 1:n_t; + + + for si=1:n_s options.sources(si).out = si; end end @@ -163,7 +198,7 @@ function VBA_updateDisplay(posterior,suffStat,options,y,it,flag) end % update middle-left subplot: hidden states - cla(display.ha(3)) + %cla(display.ha(3)) try plotUncertainTimeSeries(mux,vx,dTime,display.ha(3),ind); catch @@ -172,7 +207,7 @@ function VBA_updateDisplay(posterior,suffStat,options,y,it,flag) % update middle-right subplot: initial conditions if options.updateX0 - cla(display.ha(4)) + %cla(display.ha(4)) plotUncertainTimeSeries(-dx0,vx0,1,display.ha(4)); elseif isequal(it,0) plotUncertainTimeSeries(dx0,vx0,1,display.ha(4)); @@ -191,7 +226,7 @@ function VBA_updateDisplay(posterior,suffStat,options,y,it,flag) if size(dphi,2) == 1 % for on-line wrapper dTime = 1; end - cla(display.ha(5)) + %cla(display.ha(5)) plotUncertainTimeSeries(-dphi,vphi,dTime,display.ha(5)); displayDF(F,display) @@ -202,7 +237,7 @@ function VBA_updateDisplay(posterior,suffStat,options,y,it,flag) if size(dtheta,2) == 1 % for on-line wrapper dTime = 1; end - cla(display.ha(7)) + %cla(display.ha(7)) plotUncertainTimeSeries(-dtheta,vtheta,dTime,display.ha(7)); displayDF(F,display) @@ -216,7 +251,7 @@ function VBA_updateDisplay(posterior,suffStat,options,y,it,flag) if (options.updateHP || isequal(it,0)) && sum([options.sources(:).type]==0)>0 dTime = 1; - cla(display.ha(6)) + %cla(display.ha(6)) logCI = (log(sigmaHat+sqrt(var_sigma)) - log(sigmaHat))'; plotUncertainTimeSeries(log(sigmaHat'),logCI.^2,dTime,display.ha(6)); end @@ -224,7 +259,7 @@ function VBA_updateDisplay(posterior,suffStat,options,y,it,flag) % update middle-right subplot: state noise if options.dim.n > 0 && ~any(isinf(alphaHat)) dTime = 1; - cla(display.ha(8)) + %cla(display.ha(8)) logCI = log(alphaHat+sqrt(var_alpha)) - log(alphaHat); plotUncertainTimeSeries(log(alphaHat),logCI.^2,dTime,display.ha(8)); end @@ -251,44 +286,100 @@ function VBA_updateDisplay(posterior,suffStat,options,y,it,flag) %--- subfunction ---% function update_observation_plot() - s_out=options.sources(currentSource).out; % update top-left subplot: predictive density - cla(display.ha(1)) + %cla(display.ha(1)) y_s = y(s_out,:); y_s_on = y_s; y_s_on(options.isYout(s_out,:)==1)=nan; if options.sources(currentSource).type < 2 - p_l = plot(display.ha(1),dTime,y_s',':'); - plot(display.ha(1),dTime,y_s','.','MarkerEdgeColor',[.7 .7 .7],'MarkerSize',9); - p_mi = plot(display.ha(1),dTime,y_s_on','.','MarkerSize',9); + % excluded points + [ix,iy] = find(options.isYout(s_out,:)); + y_s_out = y_s(sub2ind(size(y_s),ix,iy)); + p_out = findobj(display.ha(1),'Tag','yOut'); + if ~ isempty(p_out) + set(p_out,'XData',dTime(iy), 'YData', y_s_out ); + else + plot(display.ha(1),dTime(iy), y_s_out, '.', 'MarkerEdgeColor',[.7 .7 .7],'MarkerSize',9,'Tag','yOut'); + end + + % data lines + p_l = findobj(display.ha(1),'Tag','yLine'); + if ~ isempty (p_l) + for i = 1 : numel (p_l) + set(p_l(i), 'XData', dTime, 'YData', y_s(i,:)); + end + else + resetColors(display.ha(1)); + p_l = plot(display.ha(1),dTime,y_s',':','Tag','yLine','MarkerSize',9); + end + + % data points + p_mi = findobj(display.ha(1),'Tag','yPoint'); + + if ~ isempty (p_mi) + for i = 1 : numel (p_mi) + set(p_mi(i), 'XData', dTime, 'YData', y_s_on(i,:)); + end + else + resetColors(display.ha(1)); + plot(display.ha(1),dTime,y_s_on','.','MarkerSize',9,'Tag','yPoint'); + end + + %for i=1:numel(p_l) + % set(p_mi(i),'MarkerEdgeColor',get(p_l(i),'Color')) + %end + + + % predictive density vy_s= vy(s_out,:); + resetColors(display.ha(1)); [~,p_vr,p_vl] = plotUncertainTimeSeries(gx(s_out,dTime),vy_s(:,dTime),dTime,display.ha(1)); - for i=1:numel(p_l) - set(p_mi(i),'MarkerEdgeColor',get(p_l(i),'Color')) - set(p_vl(i),'Color',get(p_l(i),'Color')) - set(p_vr(i),'FaceColor',get(p_l(i),'Color')) - end + + +% for i=1:numel(p_l) +% try +% set(p_vl(i),'Color',get(p_l(i),'Color')) +% set(p_vr(i),'FaceColor',get(p_l(i),'Color')) +% +% catch +% set(p_vl(i),'FaceColor',get(p_l(i),'Color')) +% set(p_vr(i),'Color',get(p_l(i),'Color')) +% end +% end else - imagesc(gx(s_out,:),'Parent',display.ha(1)); - set(display.ha(1),'Clim',[0 1]) ; - colormap(flipud(colormap('bone'))); - plot(display.ha(1),multi2num(y_s_on),'.r'); + p_im = findobj(display.ha(1),'Tag','Ypred'); + if isempty (p_im) + imagesc(gx(s_out,:),'Parent',display.ha(1),'Tag','yPred'); + set(display.ha(1),'Clim',[0 1]) ; + colormap(flipud(colormap('bone'))); + plot(display.ha(1),VBA_indicator(y_s_on, [], true),'.r','Tag','yPoint'); + else + set(p_im, 'CData', gx(s_out,:)); + p_p = findobj(display.ha(1),'Tag','yPoint'); + set(p_p,'YData',VBA_indicator(y_s_on, [], true)); + end end - + % update top-right subplot: predicted VS observed data - cla(display.ha(2)) % plot identity if options.sources(currentSource).type==0 miy = min(min([gx(s_out,:);y(s_out,:)])); may = max(max([gx(s_out,:);y(s_out,:)])); - plot(display.ha(2),[miy,may],[miy,may],'k:') else - plot(display.ha(2),[0,1],[0,1],'k:') + miy = 0; + may = 1; end + hr = findobj(display.ha(2),'Tag','refline'); + if isempty (hr) + plot(display.ha(2),[miy,may],[miy,may],'k:','Tag','refline') + else + set(hr,'XData',[miy,may],'YData',[miy,may]); + end + if options.sources(currentSource).type==0 gx_src = gx(s_out,:) ; y_src = y(s_out,:) ; @@ -298,16 +389,46 @@ function update_observation_plot() gxin(~~options.isYout(s_out,:)) = nan; yin = y_src; yin(~~options.isYout(s_out,:)) = nan; - plot(display.ha(2),gxout(:),yout(:),'.','MarkerEdgeColor',[.7 .7 .7],'MarkerSize',9) - plot(display.ha(2),gxin',yin','.','MarkerSize',9) + pOut = findobj(display.ha(2),'Tag','yOut'); + if ~ isempty(pOut) + set(pOut,'XData',gxout(:),'YData',yout(:)); + else + plot(display.ha(2),gxout(:),yout(:),'.','MarkerEdgeColor',[.7 .7 .7],'MarkerSize',9,'Tag','yOut'); + end + pIn = findobj(display.ha(2),'Tag','yIn'); + if ~ isempty(pIn) + for i=1:numel(s_out) + set(pIn(i),'XData',gxin(i,:),'YData',yin(i,:)); + end + else + resetColors(display.ha(2)); + plot(display.ha(2),gxin',yin','.','MarkerSize',9,'Tag','yIn'); + end else + pIn = findobj(display.ha(2),'Tag','yIn'); + pOut = findobj(display.ha(2),'Tag','yOut'); + + if isempty(pIn) + gridp = 0:1e-2:1; plot(display.ha(2),gridp,gridp+sqrt(gridp.*(1-gridp)),'r--') plot(display.ha(2),gridp,gridp-sqrt(gridp.*(1-gridp)),'r--') - errorbar(gridgout{currentSource},stackyout{currentSource},stdyout{currentSource},'.','Color',[.7 .7 .7],'MarkerSize',9,'parent',display.ha(2)) - errorbar(gridgin{currentSource},stackyin{currentSource},stdyin{currentSource},'.','MarkerSize',9,'parent',display.ha(2)) - end - + errorbar(gridgout{currentSource},stackyout{currentSource},stdyout{currentSource},'.','Color',[.7 .7 .7],'MarkerSize',9,'parent',display.ha(2),'Tag','yOut') + errorbar(gridgin{currentSource},stackyin{currentSource},stdyin{currentSource},'.','MarkerSize',9,'parent',display.ha(2),'Tag','yIn') + else + set(pIn, ... + 'XData', gridgin{currentSource}, ... + 'YData', stackyin{currentSource}, ... + 'LData', stdyin{currentSource}, ... + 'UData', stdyin{currentSource}); + set(pOut, ... + 'XData', gridgout{currentSource}, ... + 'YData', stackyout{currentSource}, ... + 'LData', stdyout{currentSource}, ... + 'UData', stdyout{currentSource}); + end + + end end function [] = displayDF(F,display) @@ -323,5 +444,10 @@ function update_observation_plot() end end -end + function resetColors (h) + if ~ verLessThan('matlab','8.4') + set(h,'ColorOrderIndex',1); + end + end +end diff --git a/subfunctions/displayResults.m b/core/display/displayResults.m similarity index 85% rename from subfunctions/displayResults.m rename to core/display/displayResults.m index 7067f7f8..7fdaf6b7 100644 --- a/subfunctions/displayResults.m +++ b/core/display/displayResults.m @@ -10,8 +10,10 @@ set(hres,'name','Simulation results','position',[pos(1),pos(2)-pos(4),pos(3),2*pos(4)],'color',ones(1,3),'menubar','none'); % parameters -if ~isempty(theta) hs = subplot(2,2,1,'parent',hres); + if isempty(theta) + placeHolder(hs,'no evolution parameters') + else xtick = 1:out.dim.n_theta; set(hs,'xtick',xtick,'nextplot','add','xlim',[.2,out.dim.n_theta+.8]) if ~out.options.OnLine @@ -24,9 +26,12 @@ plotUncertainTimeSeries(muTheta,V,[],hs); VBA_title(hs,'theta') plot(hs,theta,'go') -end -if ~isempty(phi) - hs = subplot(2,2,2,'parent',hres); + end + +hs = subplot(2,2,2,'parent',hres); +if isempty(phi) + placeHolder(hs,'no observation parameters') +else xtick = 1:out.dim.n_phi; set(hs,'xtick',xtick,'nextplot','add','xlim',[.2,out.dim.n_phi+.8]) if ~out.options.OnLine @@ -42,8 +47,11 @@ end % hyperparameters +hs = subplot(2,2,3,'parent',hres); n_gs = sum([out.options.sources(:).type]==0); -if n_gs>0 %~out.options.binomial +if n_gs == 0 + placeHolder(hs,'no observation hyperparameters') +else sigmaHat = posterior.a_sigma./posterior.b_sigma; vs = posterior.a_sigma./(posterior.b_sigma.^2); lvs = log(sigmaHat+sqrt(vs)) - log(sigmaHat); @@ -64,7 +72,6 @@ lab={}; end for i=1:n_gs, lab{end+1} = ['sigma' num2str(i)]; end - hs = subplot(2,2,3,'parent',hres); set(hs,'nextplot','add') plotUncertainTimeSeries(lm,lv.^2,[],hs); plot(hs,log([alpha;sigma(:)]),'go') @@ -142,7 +149,7 @@ hs = subplot(2,2,4,'parent',hres2); set(hs,'nextplot','add') plot(hs,[miy,may],[miy,may],'r') -if ~out.options.binomial +if n_gs>0 plot(hs,out.suffStat.gx(:),y(:),'.') else [stacky,stdy,gridg] = VBA_Bin2Cont(out.suffStat.gx,y); @@ -153,5 +160,18 @@ grid(hs,'on') axis(hs,'tight') -try getSubplots,end +try, VBA_getSubplots (); end + +end +% display low key text when usual plot is not required + function placeHolder(h,label) + xx = get(h,'XLim'); + yy = get(h,'YLim'); + t=text(mean(xx),mean(yy),label,'parent',h); + set(t, ... + 'HorizontalAlignment','center' , ... + 'FontSize' ,10 , ... + 'Color' ,[.6 .6 .6] ); + set(h,'Visible','off'); + end diff --git a/subfunctions/displaySimulations.m b/core/display/displaySimulations.m similarity index 97% rename from subfunctions/displaySimulations.m rename to core/display/displaySimulations.m index 2b65f4b6..ded09be7 100644 --- a/subfunctions/displaySimulations.m +++ b/core/display/displaySimulations.m @@ -1,7 +1,7 @@ function [hf] = displaySimulations(y,x,eta,e) % plots simulated time series (including state-space SVD projections) -if isweird({y,x,eta,e}) +if VBA_isWeird ({y, x, eta, e}) hf = []; return end @@ -97,5 +97,5 @@ VBA_title(ha,'measurement noise') xlabel(ha,'time') ylabel(ha,'e') -getSubplots +VBA_getSubplots (); diff --git a/stats&plots/getColors.m b/core/display/getColors.m similarity index 100% rename from stats&plots/getColors.m rename to core/display/getColors.m diff --git a/stats&plots/getPanel.m b/core/display/getPanel.m similarity index 100% rename from stats&plots/getPanel.m rename to core/display/getPanel.m diff --git a/stats&plots/plotDensity.m b/core/display/plotDensity.m similarity index 95% rename from stats&plots/plotDensity.m rename to core/display/plotDensity.m index af29ef4d..63ce6a4e 100644 --- a/stats&plots/plotDensity.m +++ b/core/display/plotDensity.m @@ -17,7 +17,7 @@ % - gX: the npXdim.n 2D array giving the grid used for forming the MCMC % empirical histograms on each dimension of the hidden states % - pX/gY: [id, but for observed data] -% See also: get_MCMC_predictiveDensity.m +% See also: VBA_MCMC_predictiveDensity.m % fill in option structure try @@ -36,7 +36,7 @@ end % get deterministic trajectory -[y,x] = simulateNLSS(... +[y,x] = VBA_simulate(... n_t,... f_fname,... g_fname,... @@ -92,10 +92,17 @@ ' - hidden states: n=',num2str(dim.n),'\n ',... ' - evolution parameters: n_theta=',num2str(dim.n_theta),'\n ',... ' - observation parameters: n_phi=',num2str(dim.n_phi),'\n ']); -if options.binomial - tmp = ' (binomial data)'; +if numel(options.sources) > 1 + tmp = ' (multisources)'; else - tmp = []; + switch options.sources.type + case 0 + tmp = ' (gaussian data)'; + case 1 + tmp = ' (binomial data)'; + case 2 + tmp = ' (multinomial data)'; + end end if dim.n >= 1 if isinf(options.priors.a_alpha) ... diff --git a/stats&plots/plotElipse.m b/core/display/plotElipse.m similarity index 100% rename from stats&plots/plotElipse.m rename to core/display/plotElipse.m diff --git a/stats&plots/plotGraph3D.m b/core/display/plotGraph3D.m similarity index 98% rename from stats&plots/plotGraph3D.m rename to core/display/plotGraph3D.m index 620c37c5..b91f62c2 100644 --- a/stats&plots/plotGraph3D.m +++ b/core/display/plotGraph3D.m @@ -77,5 +77,5 @@ ylabel('value') zlabel('probability density') colormap(flipud(autumn)) -try; getSubplots; end +try, VBA_getSubplots (); end diff --git a/stats&plots/plotUncertainTimeSeries.m b/core/display/plotUncertainTimeSeries.m similarity index 64% rename from stats&plots/plotUncertainTimeSeries.m rename to core/display/plotUncertainTimeSeries.m index 6456044e..c86fe49e 100644 --- a/stats&plots/plotUncertainTimeSeries.m +++ b/core/display/plotUncertainTimeSeries.m @@ -45,14 +45,9 @@ if sum(SX(:)) ~= 0 noButton = 0; end - % Preset axes limits - if indEnd > 1 - set(haf,'xlim',[dTime(1),dTime(end)],'nextplot','add'); - else - set(haf,'xlim',[0.2,n+0.8],'xtick',1:n,'nextplot','add'); - end + % Preset axes limits end - + % Get display indices if ~exist('ind','var') || isempty(ind) ind = [1:n]; @@ -62,32 +57,74 @@ sc = 1; % Display uncertain time series if indEnd > 1 - % Plot first moment - hp = plot(haf,dTime,muX(ind,1:indEnd)'); - % Add confidence intervals - %if sum(SX(:)) ~= 0 + + hp = findobj(haf,'Tag','putLine'); + if isempty (hp) + % Plot first moment + hp = plot(haf,dTime,muX(ind,1:indEnd)','Tag','putLine'); set(haf,'nextplot','add') - for i = 1:n + else + for i = 1 : n + set(hp(i), ... + 'XData', dTime, ... + 'YData', muX(ind(i),1:indEnd)'); + end + end + % Add confidence intervals + hf = findobj(haf,'Tag','putFill'); + if isempty(hf) + for i = 1 : n yp = [muX(ind(i),1:indEnd)+sc*sqrt(SX(ind(i),1:indEnd)),fliplr(muX(ind(i),1:indEnd)-sc*sqrt(SX(ind(i),1:indEnd)))]; + yp(isnan(yp)) = VBA_nanmean(yp); xp = [dTime,fliplr(dTime)]; - col = get(hp(i),'color'); - hf(i) = fill(xp,yp,'r','parent',haf,'facecolor',col,'edgealpha',0,'facealpha',0.25); + col = get(hp(i),'color'); + hf(i) = fill(xp,yp,'r','parent',haf,'facecolor',col,'edgealpha',0,'facealpha',0.25,'Tag','putFill'); end + else + for i = 1 : n + yp = [muX(ind(i),1:indEnd)+sc*sqrt(SX(ind(i),1:indEnd)),fliplr(muX(ind(i),1:indEnd)-sc*sqrt(SX(ind(i),1:indEnd)))]; + yp(isnan(yp)) = VBA_nanmean(yp); + xp = [dTime,fliplr(dTime)]; + %hf(i) = fill(xp,yp,'r','parent',haf,'facecolor',col,'edgealpha',0,'facealpha',0.25); + set(hf(i), 'Vertices', [xp', yp']); + end + end %end set(haf,'ygrid','on') axis(haf,'tight') + set(haf,'xlim',[dTime(1),dTime(end)],'nextplot','add'); if ~isempty(color) set(hp,'color',color) set(hf,'FaceColor',color) end + else - hp = bar(dTime:dTime+n-1,muX(ind),'facecolor',[.8 .8 .8],'parent',haf); - set(haf,'nextplot','add') - hf = errorbar(dTime:dTime+n-1,muX(ind),sc*sqrt(SX(ind)),'r.','parent',haf); + + hp = findobj(haf,'Tag','putBar'); + if isempty (hp) + hp = bar(dTime:dTime+n-1,muX(ind),'facecolor',[.8 .8 .8],'parent',haf,'Tag','putBar'); + set(haf,'nextplot','add') + else + set(hp, ... + 'XData', dTime:dTime+n-1, ... + 'YData', muX(ind)); + end + + hf = findobj(haf,'Tag','putError'); + if isempty (hf) + hf = errorbar(dTime:dTime+n-1,muX(ind),sc*sqrt(SX(ind)),'r.','parent',haf, 'Tag','putError'); + else + set(hf, ... + 'XData', dTime:dTime+n-1, ... + 'YData', muX(ind), ... + 'LData', full(sc*sqrt(SX(ind))), ... + 'UData', full(sc*sqrt(SX(ind)))); + end if ~isempty(color) set(hp,'FaceColor',color) set(hf,'color',color) end + set(haf,'xlim',[0.2,n+0.8],'xtick',1:n,'nextplot','add'); end % Add confidence intervals scaling control diff --git a/stats&plots/plotVolterra.m b/core/display/plotVolterra.m similarity index 100% rename from stats&plots/plotVolterra.m rename to core/display/plotVolterra.m diff --git a/stats&plots/rotateXLabels.m b/core/display/rotateXLabels.m similarity index 100% rename from stats&plots/rotateXLabels.m rename to core/display/rotateXLabels.m diff --git a/getStateParamInput.m b/core/sugar/getStateParamInput.m similarity index 100% rename from getStateParamInput.m rename to core/sugar/getStateParamInput.m diff --git a/subfunctions/priorPrettifyer.m b/core/sugar/priorPrettifyer.m similarity index 100% rename from subfunctions/priorPrettifyer.m rename to core/sugar/priorPrettifyer.m diff --git a/subfunctions/priorUglyfier.m b/core/sugar/priorUglyfier.m similarity index 100% rename from subfunctions/priorUglyfier.m rename to core/sugar/priorUglyfier.m diff --git a/setInput.m b/core/sugar/setInput.m similarity index 100% rename from setInput.m rename to core/sugar/setInput.m diff --git a/setPriors.m b/core/sugar/setPriors.m similarity index 100% rename from setPriors.m rename to core/sugar/setPriors.m diff --git a/VBA_FreeEnergy_UNL.m b/core/unormalizedLikelihood/VBA_FreeEnergy_UNL.m similarity index 98% rename from VBA_FreeEnergy_UNL.m rename to core/unormalizedLikelihood/VBA_FreeEnergy_UNL.m index 74297578..a9eaa086 100644 --- a/VBA_FreeEnergy_UNL.m +++ b/core/unormalizedLikelihood/VBA_FreeEnergy_UNL.m @@ -35,7 +35,7 @@ if ~isempty(indIn) ntot = length(indIn); Q = priors.SigmaPhi(indIn,indIn); - iQ = VBA_inv(Q,[]); + iQ = VBA_inv(Q); SSE = suffStat.dphi(indIn)'*iQ*suffStat.dphi(indIn); ldQ = - VBA_logDet(Q,[]); S = suffStat.Sphi - 0.5*length(indIn); diff --git a/VBA_UNLtemp.m b/core/unormalizedLikelihood/VBA_UNLtemp.m similarity index 97% rename from VBA_UNLtemp.m rename to core/unormalizedLikelihood/VBA_UNLtemp.m index df2aa54b..031b0f52 100644 --- a/VBA_UNLtemp.m +++ b/core/unormalizedLikelihood/VBA_UNLtemp.m @@ -37,7 +37,7 @@ s2 = s2 + Zi; % Accelerate divergent update - if isweird({EUi,Ui,Zi,d2Uidx2,d2UidP2}) + if VBA_isWeird({EUi,Ui,Zi,d2Uidx2,d2UidP2}) div = 1; break end diff --git a/VBA_evalAL.m b/core/unormalizedLikelihood/VBA_evalAL.m similarity index 92% rename from VBA_evalAL.m rename to core/unormalizedLikelihood/VBA_evalAL.m index bc43e986..9a748d0e 100644 --- a/VBA_evalAL.m +++ b/core/unormalizedLikelihood/VBA_evalAL.m @@ -122,9 +122,9 @@ dUdx = zeros(1,dim.n); else if deriv(3) - dUdx = numericDiff(g_fname,1,Xt,P,ut,yt,in); + dUdx = VBA_numericDiff(g_fname,1,Xt,P,ut,yt,in); else - [d2Udx2,dUdx] = numericDiff(@numericDiff,3,g_fname,1,Xt,P,ut,yt,in); + [d2Udx2,dUdx] = VBA_numericDiff(@numericDiff,3,g_fname,1,Xt,P,ut,yt,in); deriv(3) = 1; end end @@ -133,7 +133,7 @@ if dim.n==0 d2Udx2 = zeros(dim.n,dim.n); else - d2Udx2 = numericDiff(@getDU,3,g_fname,2,Xt,P,ut,yt,in); + d2Udx2 = VBA_numericDiff(@getDU,3,g_fname,2,Xt,P,ut,yt,in); end end if ~deriv(2) @@ -143,7 +143,7 @@ if deriv(4) dUdp = numericDiff(g_fname,2,Xt,P,ut,yt,in); else - [d2Udp2,dUdp] = numericDiff(@numericDiff,4,g_fname,2,Xt,P,ut,yt,in); + [d2Udp2,dUdp] = VBA_numericDiff(@numericDiff,4,g_fname,2,Xt,P,ut,yt,in); deriv(4) = 1; end end @@ -152,7 +152,7 @@ if dim.n_phi==0 d2Udx2 = zeros(dim.n_phi,dim.n_phi); else - d2Udp2 = numericDiff(@getDU,4,g_fname,3,Xt,P,ut,yt,in); + d2Udp2 = VBA_numericDiff(@getDU,4,g_fname,3,Xt,P,ut,yt,in); end end diff --git a/VBA_evalAL2.m b/core/unormalizedLikelihood/VBA_evalAL2.m similarity index 88% rename from VBA_evalAL2.m rename to core/unormalizedLikelihood/VBA_evalAL2.m index b757ec3c..207d0fe1 100644 --- a/VBA_evalAL2.m +++ b/core/unormalizedLikelihood/VBA_evalAL2.m @@ -100,9 +100,9 @@ dUdx = zeros(1,dim.n); else if deriv(3) - dUdx = numericDiff(g_fname,1,Xt,P,ut,yt,in); + dUdx = VBA_numericDiff(g_fname,1,Xt,P,ut,yt,in); else - [d2Udx2,dUdx] = numericDiff(@numericDiff,3,g_fname,1,Xt,P,ut,yt,in); + [d2Udx2,dUdx] = VBA_numericDiff(@numericDiff,3,g_fname,1,Xt,P,ut,yt,in); deriv(3) = 1; end end @@ -111,7 +111,7 @@ if dim.n==0 d2Udx2 = zeros(dim.n,dim.n); else - d2Udx2 = numericDiff(@getDU,3,g_fname,2,Xt,P,ut,yt,in); + d2Udx2 = VBA_numericDiff(@getDU,3,g_fname,2,Xt,P,ut,yt,in); end end if ~deriv(2) @@ -119,9 +119,9 @@ dUdx = zeros(1,dim.n); else if deriv(4) - dUdp = numericDiff(g_fname,2,Xt,P,ut,yt,in); + dUdp = VBA_numericDiff(g_fname,2,Xt,P,ut,yt,in); else - [d2Udp2,dUdp] = numericDiff(@numericDiff,4,g_fname,2,Xt,P,ut,yt,in); + [d2Udp2,dUdp] = VBA_numericDiff(@numericDiff,4,g_fname,2,Xt,P,ut,yt,in); deriv(4) = 1; end end @@ -130,7 +130,7 @@ if dim.n_phi==0 d2Udx2 = zeros(dim.n_phi,dim.n_phi); else - d2Udp2 = numericDiff(@getDU,4,g_fname,3,Xt,P,ut,yt,in); + d2Udp2 = VBA_numericDiff(@getDU,4,g_fname,3,Xt,P,ut,yt,in); end end diff --git a/demos/0_basics/demo_dynamicalSystem.m b/demos/0_basics/demo_dynamicalSystem.m new file mode 100755 index 00000000..9146af6a --- /dev/null +++ b/demos/0_basics/demo_dynamicalSystem.m @@ -0,0 +1,220 @@ + +function [posterior, out] = demo_dynamicalSystem () +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% [posterior, out] = demo_dynamicalSystem () +% Demo of dynamical system simulation and inversion +% +% This demo provides a simple example of the simulation and the estimation +% for a simple dynamical system. +% As a comparison, this demo also implement a grid search that can be +% compared to the variational result. +% +% ///////////////////////////////////////////////////////////////////////// + +%% Define the model +% ========================================================================= + +% Description of the dynamics +% ------------------------------------------------------------------------- +% evolution is a parametric convolution of the input with one hidden state +f_fname = @f_alpha; +% - define size of a time step +dt = 1e-1; % in sec +% - store in the structure that will be passed to the evolution function +options.inF.dt = dt; + +% observations are simply noisy mappings of the states (y = x + noise) +g_fname = @g_Id; +% however only the first state is observable +options.inG.ind = 1; + +% Dimensions of the model +% ------------------------------------------------------------------------- +% evolution parametesr +dim.n_theta = 2; +% observation parameters +dim.n_phi = 0; +% number of states +dim.n = 2; + +%% Simulate data +% ========================================================================= + +% Define design, here inputs to be convolved +% ------------------------------------------------------------------------- +% number of observations +n_t = 1e2 / dt; +% baseline +u = zeros (1, n_t); +% random pulses +nPulses = 16; +u(randperm (n_t, nPulses)) = 1; + +% Parameters of the model to be simulated +% ------------------------------------------------------------------------- +% evolution parameters +theta = [1.1 / dt; 0.1]; +% observation parameters +phi = []; +% initial state +x0 = [0; 0]; + +% state noise precision +alpha = Inf; % deterministic +% observation precision +sigma = 1e2; % no noise + +% Simulate +% ------------------------------------------------------------------------- +[y, x] = VBA_simulate (n_t, f_fname, g_fname, theta, phi, u, alpha, sigma, options, x0); + +% Display +% ------------------------------------------------------------------------- +plotSimulation (u, x, y, n_t, dt); + +%% Perform model estimation, the VBA way +% ========================================================================= +fprintf('Variational estimation... '); +tic; + +% call VBA invesrion routine +% ------------------------------------------------------------------------- +options.verbose = false; +[posterior,out] = VBA_NLStateSpaceModel(y,u,f_fname,g_fname,dim,options); + +% find best parameter +% ------------------------------------------------------------------------- +theta_VB = posterior.muTheta; + +% chrono +% ------------------------------------------------------------------------- +fprintf('done! (took %f seconds)\n', toc); + +%% Perform model estimation, the old way +% ========================================================================= +% for the sake of the example, we'll now try to recover the parameters from +% the data using a grid search over parameters and look for the set that +% maximimizes the likelihood +fprintf('Performing grid search... '); +tic; + +% define grid for parameter search +% ------------------------------------------------------------------------- +N = 9; +g_theta1 = 1 / dt + linspace (- 4 , 4, N); +g_theta2 = linspace (- 1, 1, N); + +% Compute predictions and likelihoodfor each parameter set +% ------------------------------------------------------------------------- +options.verbose = false; +for i1 = 1 : numel (g_theta1) + for i2 = 1 : numel (g_theta2) + theta_grid = [g_theta1(i1); g_theta2(i2)]; + sigma_test = Inf; + g{i1, i2} = VBA_simulate (n_t, f_fname, g_fname, theta_grid, phi, u, alpha,sigma_test, options, x0); + LL(i1, i2) = - sum ( (y - g{i1, i2}) .^ 2); + end +end + +% find best parameter +% ------------------------------------------------------------------------- +[~, i1_ML, i2_ML] = VBA_maxMat(LL); +theta_ML = [g_theta1(i1_ML); g_theta2(i2_ML)]; + +% Display results +% ------------------------------------------------------------------------- +plotGridSearch (y, g, g_theta1, g_theta2, LL, n_t, dt); + +% chrono +% ------------------------------------------------------------------------- +fprintf('done! (took %f seconds)\n', toc); + +%% Show results +% ========================================================================= +fprintf('Results of the estimation:\n'); +disp (table (... + [theta(1); theta_ML(1); theta_VB(1)], ... + [theta(2); theta_ML(2); theta_VB(2)], ... + 'RowNames',{'trueValue','ML','VB'}, ... + 'VariableNames',{'theta_1','theta_2'})); + +end + +%% ######################################################################## +% Display functions +% ######################################################################## +function plotSimulation (u, x, y, n_t, dt) + hf = figure ('color', 'w', 'Name', 'demo_dynamicalSystem: simulation'); + + % x + timeline = (0 : n_t - 1) * dt; + + % plot inputs + ha(1) = subplot (3, 1, 1, 'parent', hf); + plot (ha(1), timeline, u); + xlabel (ha(1), 'time (sec)'); + ylabel (ha(1), 'u(t)'); + title (ha(1), 'input'); + + % plot state trajectory + ha(2) = subplot (3, 1, 2, 'parent', hf); + plot (ha(2), timeline, x); + xlabel (ha(2), 'time (sec)'); + ylabel (ha(2), 'x(t)'); + title (ha(2), 'state'); + + % plot observations + ha(3) = subplot (3, 1, 3, 'parent', hf); + plot (ha(3), timeline, y, '.'); + xlabel (ha(3), 'time (sec)'); + ylabel (ha(3), 'y(t)'); + title (ha(3), 'observation'); + + % make pretty + set (ha, 'box', 'off', 'ygrid', 'on', 'ylim', [- 0.2, 1.2]); +end + +function plotGridSearch (y, g, g_theta1, g_theta2, LL, n_t, dt) + hf = figure ('color', 'w', 'Name', 'demo_DynamicalSystem: grid search'); + + % x + timeline = (0 : n_t - 1) * dt; + + % Likelihood + eLL = exp(LL/100); + hb(1) = subplot (1, 2, 2, 'parent', hf); + imagesc(eLL, 'Parent', hb(1)); + colorLims = [-0.15 1.1*VBA_maxMat(eLL)]; + set(hb(1),'XTick',1 : numel(g_theta2), 'XTickLabel', g_theta2); + set(hb(1),'YTick',1 : numel(g_theta1), 'YTickLabel', g_theta1); + set(hb(1),'Clim',colorLims); + colorbar + title (hb(1), 'Likelihood'); + xlabel (hb(1), 'theta_2') + ylabel (hb(1), 'theta_1') + + % predictions about theta + hb(2) = subplot (1, 2, 1, 'parent', hf); + xlabel (hb(2), 'time (sec)'); + ylabel (hb(2), 'observations / predictions'); + title (hb(2), 'trajectories'); + hold on; + + % make pretty + set (hb(2), 'box', 'off', 'ygrid', 'on'); + + % color predictions as function of likelihood + xLL = linspace(colorLims(1),colorLims(2), 64); + cLL = colormap(flipud(colormap('hot'))); + for i1 = 1 : numel (g_theta1) + for i2 = 1 : numel (g_theta2) + l = plot(hb(2),timeline,g{i1,i2}); + %l(2) = plot(hb(3),timeline,g{i1,i2}(2,:)); + set(l,'Color',interp1(xLL, cLL, eLL(i1,i2))); + end + end + + % add data + plot (hb(2), timeline, y, '.', 'Color', [.6 .6 .6], 'MarkerSize', 6); +end diff --git a/demos/0_basics/demo_excludeData.m b/demos/0_basics/demo_excludeData.m new file mode 100644 index 00000000..4d376679 --- /dev/null +++ b/demos/0_basics/demo_excludeData.m @@ -0,0 +1,83 @@ +function demo_excludeData () +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% [posterior, out] = demo_excludeData () +% Demo of simulation and inversion of a dynamical system that is sampled on +% an irregular grid (not all timepoints are observed) +% +% More generally, this demo shows how to deal with data exclusion +% +% ///////////////////////////////////////////////////////////////////////// + +% number of simulated points +N = 257; + +%% Specify the model +% ========================================================================= +% For the sake of the demonstration, we use a simple linear dynamical +% system $ dx/dt = Ax + phi' u $ that is directly observed $ y = x + noise $ + +% evolution function +% ------------------------------------------------------------------------- +options.inF.A = [- 4, - 16; 4, - 4]; +options.inF.dt = 1e-1; + +function fx = f_evolution(x,P,u,in) + xdot = in.A * x + diag(P) * u; + fx = x + in.dt * xdot; +end + +f_fname = @f_evolution; + +% observation function +% ------------------------------------------------------------------------- +g_fname = @g_Id; + +%% Simulate data +% ========================================================================= + +% inputs +u = randn(2, N); + +% parameters +x0 = [0; 0]; % initial state +theta = [1; 2]; % effect of inputs +phi = []; % no observation parameters +alpha = Inf; % deterministic +sigma = 1; % observation noise + +% Build full time series of hidden states and observations +[y,x,x0,eta,e] = VBA_simulate (N,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0); + +% display full time series of hidden states and observations +displaySimulations(y,x,eta,e); + +%% Decimation +% ========================================================================= +% here we will simulate a sampling of data on an irregular grid by +% 'removing' data from the full simulation + +% sampling grid (exponential timestps) +timepoints = 2 .^ (1 : floor (log (N) ./ log (2))); + +% exclude all data but timepoints: +% if options.isYout = 1, the datapoint is ignored +options.isYout = ones (size (y)); +options.isYout(:, timepoints) = 0; + +%% Inversion +% ========================================================================= + +% dimensions of the problem +dim.n_theta = 2; +dim.n_phi = 0; +dim.n = 2; +dim.p = 2; + +% Invert deterministic model +[posterior,out] = VBA_NLStateSpaceModel(y,u,f_fname,g_fname,dim,options); + +% display +displayResults(posterior,out,y-e,x,x0,theta,phi,alpha,sigma); + +end \ No newline at end of file diff --git a/demos/0_basics/demo_modelComparison.m b/demos/0_basics/demo_modelComparison.m new file mode 100755 index 00000000..5112769a --- /dev/null +++ b/demos/0_basics/demo_modelComparison.m @@ -0,0 +1,107 @@ +function demo_modelComparison () +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% demo_modelComparison () +% demo of random-effect group-level bayesian model selection in the context +% of nested linear models. +% +% ///////////////////////////////////////////////////////////////////////// + +% definition of the observations and models +% ========================================================================= + +% dimensions +% ------------------------------------------------------------------------- +% number of subjects +nSubjects = 32; +% number of observatiosn per subject +nObs = 64; +% number of predictors (regressors) +nPred = 4; + +% parameters +% ------------------------------------------------------------------------- +% noise variance +sigma2 = 1e0; +% signal strength +signal = 1e0; + +% generate data +% ------------------------------------------------------------------------- +for i = 1 : nSubjects + + % full design matrix + X1 = randn (nObs, nPred); + % nested model + X2 = X1(:, 1 : 2); + + % random weights + b = sqrt (signal) * rand(nPred, 1); + + % simulate observations + y1 = X1 * b + sqrt (sigma2) * randn (nObs, 1); + y2 = X2 * b(1 : 2) + sqrt (sigma2) * randn (nObs, 1); + + % compute model evidence (frequentist limit) + logEvidence_y1(1, i) = lev_GLM (y1, X1); + logEvidence_y1(2, i) = lev_GLM (y1, X2); + + logEvidence_y2(1, i) = lev_GLM (y2, X1); + logEvidence_y2(2, i) = lev_GLM (y2, X2); + +end + +% display empirical histogram of log-Bayes factors +% ------------------------------------------------------------------------- +plotBayesFactor (logEvidence_y1, logEvidence_y2); + +% perform model selection with the VBA +% ========================================================================= +options.verbose = false; + +% perform group-BMS on data generated under the full model +[p1, o1] = VBA_groupBMC (logEvidence_y1, options); +set (o1.options.handles.hf, 'name', 'group BMS: y_1') + +fprintf('Statistics in favor of the true model (m1): pxp = %04.3f (Ef = %04.3f)\n', o1.pxp(1), o1.Ef(1)); + +% perform group-BMS on data generated under the nested model +[p2, o2] = VBA_groupBMC (logEvidence_y2, options); +set (o2.options.handles.hf, 'name', 'group BMS: y_2') + +fprintf('Statistics in favor of the true model (m2): pxp = %04.3f (Ef = %04.3f)\n', o2.pxp(2), o2.Ef(2)); + +% classical hypothesis testing +% ========================================================================= +% for the sake of the example, perform same analysis using an F-test + +% check if X1 better than X2 on y1 +c = [zeros(2); eye(2)]; +[pv,stat,df] = GLM_contrast (X1, y1, c, 'F', true); +set(gcf,'name','classical analysis of y1') + +fprintf('Statistics in favor of the full model (m1): p = %04.3f (F = %04.3f)\n', pv, stat, df(1), df(2)); + +% check if X1 better than X2 on y2 +[pv,stat,df] = GLM_contrast (X1, y2, c, 'F', true); +set(gcf,'name','classical analysis of y2') + +fprintf('Statistics in favor of the full model (m1): p = %04.3f (F = %04.3f)\n', pv, stat, df(1), df(2)); +% note that an absence of significance does not mean significant absence! + +end + +%% ######################################################################## +% display subfunctions +% ######################################################################### +function plotBayesFactor (logEvidence_y1, logEvidence_y2) + [n1, x1] = VBA_empiricalDensity ((logEvidence_y1(1,:) - logEvidence_y1(2, :))'); + [n2, x2] = VBA_empiricalDensity ((logEvidence_y2(1,:) - logEvidence_y2(2 ,:))'); + hf = figure ('color' ,'w', 'name', 'demo_modelComparison: distribution of log Bayes factors'); + ha = axes ('parent', hf,'nextplot','add'); + plot (ha, x1, n1, 'color', 'r'); + plot (ha, x2, n2, 'color', 'b'); + legend (ha, {'true = model 1', 'true = model 2'}); + xlabel (ha, 'log p(y|m1) - log(y|m2)'); + ylabel (ha, 'proportion of simulations'); +end diff --git a/demos/0_basics/demo_multisession.m b/demos/0_basics/demo_multisession.m new file mode 100644 index 00000000..ade13747 --- /dev/null +++ b/demos/0_basics/demo_multisession.m @@ -0,0 +1,103 @@ +function [posterior, out] = demo_multisession () +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% [posterior, out] = demo_multisession () +% Shows hot to use to fit multiple sessions at once +% +% ///////////////////////////////////////////////////////////////////////// + +%% Model definition +% ========================================================================= + +% evolution function +function [fx] = f_demo_multisession (Xt, Theta, ut, ~) + % x(t+1) = x(t) + u(t) theta(1) + theta(2) + fx = Xt + [ut 1] * Theta; +end + +function [gx] = g_demo_multisession (Xt, Phi, ~, ~) + % y(t) = x(t) + phi(1) + noise + gx = Phi + Xt; +end + + +%% Simulations +% ========================================================================= +% To emulate multisession, we simulate the same model twice with different +% set of parameters + +% total number of observation +n_t = 100; +% inputs for overal experiment +u = rand (1, n_t); +% no display +options.verbose = false; + +% first session +theta = [5; 1]; +phi = 5; +X0 = 0; +y1 = VBA_simulate (round(n_t/2),@f_demo_multisession,@g_demo_multisession,theta,phi,u(1:(n_t/2)),Inf,.01,options,X0); + +% second session +theta = - theta; % reverse evolution +phi = phi - 3; % shift in observations +X0 = 0; +y2 = VBA_simulate (round(n_t/2),@f_demo_multisession,@g_demo_multisession,theta,phi,u((1+n_t/2):end),Inf,.01,options,X0); + +% concatenate both sessions +y = [y1 y2]; + +%% Model inversion +% ========================================================================= + +% dimension of the problem +dim.n_t = n_t; % number of observation timepoints +dim.n_phi = 1; % nb of observation parameters +dim.n_theta = 2; % nb of evolution parameters +dim.n = 1; % nb of states + +% display options +options.verbose = true; + +% fit data as 2 sessions +% ------------------------------------------------------------------------- +% split into sessions +options.multisession.split = [n_t/2 n_t/2]; % two sessions of 120 datapoints each + +% By default, all parameters are duplicated for each session. However, you +% can fix some parameters so they remain constants across sessions. + +% + Example: same evolution parameter in both sessions +% options.multisession.fixed.theta = 1; % <~ uncomment for fixing theta(1) + +% + Example: same observation parameter in both sessions +% options.multisession.fixed.phi = 1; % <~ uncomment for fixing phi(1) + +% + Example: same initial state in both sessions +% options.multisession.fixed.X0 = 1; % <~ uncomment for fixing X0(1) + +% Model identification as usual +[posterior.split,out.split] = VBA_NLStateSpaceModel(y,u,@f_demo_multisession,@g_demo_multisession,dim,options); +set(out.split.options.hf, 'name','demo_multisession: duplicated parameters') + +% 1 session case +% ------------------------------------------------------------------------- +% fix all parameters to equal across sessions. Note that it would be +% simpler to just remove the multisession options! +options.multisession.fixed.theta = 'all'; % shrotcut for 1:dim.n_theta +options.multisession.fixed.phi = 'all'; % shrotcut for 1:dim.n_phi + +% Model identification as usual +[posterior.fixed,out.fixed] = VBA_NLStateSpaceModel(y,u,@f_demo_multisession,@g_demo_multisession,dim,options); +set(out.fixed.options.hf, 'name','demo_multisession: fixed parameters') + +end + + + + + + + + diff --git a/demos/0_basics/demo_sources.m b/demos/0_basics/demo_sources.m new file mode 100644 index 00000000..95446595 --- /dev/null +++ b/demos/0_basics/demo_sources.m @@ -0,0 +1,164 @@ +function [posterior, out, summary] = demo_sources () +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% [posterior, out] = demo_sources () +% Demo of observation distribution specification +% +% This demo shows how to simulate and invert models having different, and +% possibly multiple, data distributions +% +% Background +% ~~~~~~~~~~ +% +% Let's say you want to measure the ability of your subjects to detect a +% stimulus and you collect three types of observations: the (log-) reaction time +% (continuous, normally distributed), a button press (seen/unseen, binary), +% and a rating between 1 and 5 (very unlikely <-> very likely, +% multinomial). +% In this simplified example, we assume that all responses are an affine +% transformation of the stimulus intensity (u). We then map to the observation +% using ad hoc link function (logistic for button press, softmax for +% ratings) +% Although we can fit the respective modalities independently (first part of +% the demo), we can also use the hypothesis that the same generative model +% (ie the same set of parameters) are underlying all observations and +% therefore fit all data at once (second part of the demo) to get a better +% estimation. +% +% ///////////////////////////////////////////////////////////////////////// + +%% Define the models +% ========================================================================= + +% Gaussian observations +% ------------------------------------------------------------------------- +function gx = g_source1 (~, phi, ut, ~) + % affine mapping + gx = - phi(1) * ut + phi(2); +end + +% Binary observations +% ------------------------------------------------------------------------- +function gx = g_source2 (~, phi, ut, ~) + % sigmoid mapping + gx = VBA_sigmoid(ut, 'slope', phi(1), 'center', phi(2)); +end + +% Multinomial observations +% ------------------------------------------------------------------------- +function gx = g_source3 (~, phi, ut, ~) + % softmax mapping + ratings = - 2 : 2; + gx = exp (- phi(1) * (ut - ratings - phi(2)) .^ 2); + gx = gx(:) / sum (gx); +end + +%% Simulate data +% ========================================================================= + +% Experimental design +% ------------------------------------------------------------------------- + +% generate experimental design +T = 50; +u = randn(1,T); + +% Choose basic settings for simulations +phi = [2; 1]; % observation parameters (scale and offset) + +% dimension of the models (here the same for all) +dim.n_phi = 2; % nb of observation parameters + + +% Simulations +% ------------------------------------------------------------------------- + +options.sources.type = 0; % flag for gaussian observations (default, can be omited) +sigma = 0.1; % precision of the gaussian noise +y1 = VBA_simulate (T,[],@g_source1,[],phi,u,Inf,sigma,options); + +options.sources.type = 1; % flag for Bernoulli observations +y2 = VBA_simulate (T,[],@g_source2,[],phi,u,Inf,[],options); + +options.sources.type = 2; % flag for multinomial observations +y3 = VBA_simulate (T,[],@g_source3,[],phi,u,Inf,[],options); + +% Inversion +% ------------------------------------------------------------------------- +% Here, we will assume that the paramters are NOT shared across observations. +% If we want to compare this to the hypothesis that the same parameters are +% generating the data, we can simply chain the inversion by carrying over +% the posterior as a prior for the next inversion. A better way is to +% invert all data at once (see below). + +options.sources.type = 0; +[posterior.s1, out.s1] = VBA_NLStateSpaceModel(y1,u,[],@g_source1,dim,options); + +% options.priors = posterior.s1; % <~ uncomment for the shared parameter hypothesis +options.sources.type = 1; +[posterior.s2, out.s2] = VBA_NLStateSpaceModel(y2,u,[],@g_source2,dim,options); + + +% options.priors = posterior.s2; % <~ uncomment for the shared parameter hypothesis +options.sources.type = 2; +[posterior.s3, out.s3] = VBA_NLStateSpaceModel(y3,u,[],@g_source3,dim,options); + + +%% Combine observations in a joint estimation +% ========================================================================= + +% concatenate observations +y = [y1; y2; y3]; + +% define joint observation function: as we need to predicts all lines of +% observations at once, we simply concatenate respective predictions the +% same way we concatenated y1 to y3. +% Note that in general, you don't have to necessarily forward all inputs +% and parameters to each observation subfunctions... +function gx = g_sourceAll (xt, phi, ut, in) + gx = cat(1, ... + g_source1 (xt, phi, ut, in), ... + g_source2 (xt, phi, ut, in), ... + g_source3 (xt, phi, ut, in)); +end + +options = struct (); +% describe the type of distribution of the different lines of observations +options.sources(1).out = 1; % selecting y1 line in y +options.sources(1).type = 0; % flag for gaussian observations + +options.sources(2).out = 2; % selecting y2 line in y +options.sources(2).type = 1; % flag for bernoulli observations + +options.sources(3).out = 3 : 7; % selecting y3 lines in y +options.sources(3).type = 2; % flag for multinomial observations + +% note that we could generate new multisource data as following: +% y = VBA_simulate (T,[],@g_sourceAll,[],phi,u,Inf,[],options); + +% call inversion routine +[posterior.all, out.all] = VBA_NLStateSpaceModel(y,u,[],@g_sourceAll,dim,options); + +%% Display results +% ========================================================================= + +post2str = @ (post) arrayfun (@ (m, v) sprintf('%3.2f (%3.2f)', m, v), post.muPhi, diag(post.SigmaPhi), 'UniformOutput', false); + +summary = table ( ... + post2str (posterior.s1), ... + post2str (posterior.s2), ... + post2str (posterior.s3), ... + mean([posterior.s1.muPhi, posterior.s2.muPhi, posterior.s3.muPhi],2), ... + post2str (posterior.all), ... + phi, ... + 'RowNames', {'phi_1', 'phi_2'}, ... + 'VariableNames', {'y1','y2','y3','mean','joint','true'}); + +fprintf('Posterior parameter estimates:\n') +disp (summary); + +p = VBA_softmax ([(out.s1.F + out.s2.F + out.s3.F), out.all.F]); +fprintf ('\nPosterior probability of parameters being shared: %4.3f\n', p(2)); + + +end \ No newline at end of file diff --git a/demos/0_basics/demo_staticModel.m b/demos/0_basics/demo_staticModel.m new file mode 100644 index 00000000..e3f0cb35 --- /dev/null +++ b/demos/0_basics/demo_staticModel.m @@ -0,0 +1,79 @@ +function [posterior, out] = demo_staticModel () +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% [posterior, out] = demo_staticModel () +% Demo of simple linear Bayesian regression +% +% The problem is to fit observations with a linear combination +% of regressors. Formally, this can be written as: +% +% $$ y = X.\beta + \vareps $$ +% +% where $y$ is the set of observations, $X$ is the design matrix containing +% the regressors, $\beta$ is a vector of parameters defining the respective +% weight of the regressors, and $\vareps ~ N(0,\sigma^2)$ is some measurment +% noise. +% The objective is thus to estimate $\beta$ and $\sigma^2$ from the data +% given X. +% +% ///////////////////////////////////////////////////////////////////////// + +%% Specify the linear model +% ========================================================================= + +% number of observations +N = 100; + +% regressors +k = 1; % this is a simple regression. Set k > 1 for a multiple regression model +regressors = randn(k, N); + +% observation function that links inputs (regressors), parameters (beta +% weights) and observations +function [gx] = g_linearRegression (~, betas, regressors, ~) + % predictied observations + gx = betas' * regressors ; +end + +%% Simulate data +% ========================================================================= +% 'true' weights of the regressors +betas = rand(k,1) ; + +% 'true' observation noise +sigma2 = 5; + +% simulate data +% this applies g_linearRegression to all columns of the inputs (regressors) +% with the given parameters beta and sigma +[y] = VBA_simulate (N,[],@g_linearRegression,[],betas,regressors,Inf,sigma2,struct,[]); + + +%% Inversion +% ========================================================================= +% specify dimensions of the problem +% + number of observation parameters, ie. here nuber of betas +dim.n_phi = k; +% number of inputs, ie. here number of regressors +dim.u = k; +% + size of observations (number of predictions per input) +dim.p = 1; + +% call inversion routine +[posterior,out] = VBA_NLStateSpaceModel(y,regressors,[],@g_linearRegression,dim,struct); + +%% Results +% ========================================================================= +% estimated beta weights +fprintf('Estimated beta weights: '); +fprintf('%3.2f ', posterior.muPhi); +fprintf('\n'); + +% fit +fprintf('Percentage of explained variance (R2): %03.2f\n', out.fit.R2); + +% plot results vs. true parameter values +displayResults(posterior,out,y,[],[],[],betas,[],sigma2); + +end + diff --git a/subfunctions/demo_MCsampling.m b/demos/1_advanced/demo_MCsampling.m similarity index 90% rename from subfunctions/demo_MCsampling.m rename to demos/1_advanced/demo_MCsampling.m index 070debd9..8c1c0541 100644 --- a/subfunctions/demo_MCsampling.m +++ b/demos/1_advanced/demo_MCsampling.m @@ -17,8 +17,9 @@ % Build options structure for temporal integration of SDE -inG.G0 = 50; -inG.beta = 0.2; +inG.scale = 50; +inG.slope = 0.2; + inF.deltat = deltat; options.inF = inF; options.inG = inG; @@ -40,7 +41,7 @@ dim.n = 3; dim.p = dim.n; -[pX,gX,pY,gY,X,Y] = get_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim,1e2); +[pX,gX,pY,gY,X,Y] = VBA_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim,1e2); [h] = plotDensity(f_fname,g_fname,u,n_t,options,dim,pX,gX,pY,gY); diff --git a/subfunctions/demo_MFX.m b/demos/1_advanced/demo_MFX.m similarity index 64% rename from subfunctions/demo_MFX.m rename to demos/1_advanced/demo_MFX.m index 155ea252..682eab24 100644 --- a/subfunctions/demo_MFX.m +++ b/demos/1_advanced/demo_MFX.m @@ -1,10 +1,6 @@ +function demo_MFX % this demo exemplifies the use of mixed-effects analysis in VBA -clear all -close all -clc -dbstop if error - ns = 8; % #subjects dim.n_phi = 2; dim.n = 1; @@ -29,22 +25,32 @@ options{i}.DisplayWin = 0; options{i}.verbose = 0; options{i}.dim = dim; - options{i}.binomial = 0; - [y{i}] = simulateNLSS(dim.n_t,f_fname,g_fname,theta(:,i),phi(:,i),u{i},Inf,Inf,options{i},x0(:,i)); + options{i}.sources.type = 0; + [y{i}] = VBA_simulate (dim.n_t,f_fname,g_fname,theta(:,i),phi(:,i),u{i},Inf,Inf,options{i},x0(:,i)); end - +% TO REMOVE: obsolete syntax ?? % priors_group.QPhi = 0.*eye(dim.n_phi); % priors_group.QTheta = 0.*eye(dim.n_theta); % priors_group.QX0 = 0.*eye(dim.n); % priors_group.QPhi(2,2) = 0; % ffx -% priors_group.SigmaPhi = eye(dim.n_phi); -% priors_group.SigmaPhi(2,2) = 0; % fix population mean to 0 -[p_sub,o_sub,p_group,o_group] = VBA_MFX(y,u,f_fname,g_fname,dim,options);%,priors_group); +priors_group.muPhi = ones(dim.n_phi,1); +priors_group.SigmaPhi = eye(dim.n_phi); + +% TEST CASES (to comment/uncomment) +% 1. fix population mean to 0 for phi(1) +priors_group.SigmaPhi(1,1) = 0; +% 2. fixed-effect over the population for phi(1) +priors_group.a_vPhi = ones(dim.n_phi,1); +priors_group.b_vPhi = ones(dim.n_phi,1); +priors_group.a_vPhi(1) = Inf; +priors_group.b_vPhi(1) = 0; + +[p_sub,o_sub,p_group,o_group] = VBA_MFX(y,u,f_fname,g_fname,dim,options,priors_group);%,priors_group); % extract within-subject parameter estimates for i=1:ns - % with MFX-type priors + % with MFX-type priors Theta(:,i,1) = p_sub{i}.muTheta; Phi(:,i,1) = p_sub{i}.muPhi; X0(:,i,1) = p_sub{i}.muX0; @@ -109,3 +115,43 @@ legend(ha,leg) end +end + +%% ######################################################################## +function [out] = addBestLinearPredictor(ho,verbose) +% fits a GLM on current graphical object (and adds the line on the graph) +try,ho;catch,ho=gco;end +try,verbose;catch,verbose=1;end +if ~isequal(get(ho,'type'),'line') + disp('addbestLinearPredictor: current graphical object is not a line plot!') + out = []; + return +end +out.gco = ho; +try, verbose; catch; verbose = 0; end +x = VBA_vec(get(ho,'xdata')); +n = length(x); +X = [x,ones(n,1)]; +y = VBA_vec(get(ho,'ydata')); +[pv,stat,df,out] = GLM_contrast(X,y,[1;0],'F',verbose); +if ~verbose + out.pv = pv; + out.stat = stat; + out.df = df; +end +try + set(out.handles.hf,'name','addbestLinearPredictor on current graphical object') +end +ha = get(ho,'parent'); +status = get(ha,'nextplot'); +xlim = get(ha,'xlim'); +ylim = get(ha,'ylim'); +set(ha,'nextplot','add'); +col = get(ho,'color'); +yhat = [VBA_vec(xlim),ones(2,1)]*out.b; +out.hp = plot(ha,xlim,yhat','color',col) +set(ha,'nextplot',status,'xlim',xlim,'ylim',ylim); + +end + + diff --git a/subfunctions/demo_RFX.m b/demos/1_advanced/demo_RFX.m similarity index 96% rename from subfunctions/demo_RFX.m rename to demos/1_advanced/demo_RFX.m index 7c1fc3fb..d6b72484 100644 --- a/subfunctions/demo_RFX.m +++ b/demos/1_advanced/demo_RFX.m @@ -61,7 +61,7 @@ options.inG.X = X1; for i=1:2 x0(1) = 2-i; - [y,x,x0,eta,e] = simulateNLSS(1,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0); + [y,x,x0,eta,e] = VBA_simulate (1,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0); % Invert model with and without 2nd-level effect for j=1:2 options.priors.SigmaX0(1,1) = 2-j; % group mean effect @@ -103,6 +103,6 @@ title(ha,'evidence for a group effect') box(ha,'off') -getSubplots +VBA_getSubplots (); diff --git a/demos/1_advanced/demo_VolterraKernels.m b/demos/1_advanced/demo_VolterraKernels.m new file mode 100644 index 00000000..90c4234f --- /dev/null +++ b/demos/1_advanced/demo_VolterraKernels.m @@ -0,0 +1,81 @@ +function [posterior, out] = demo_VolterraKernels () +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% demo_VolterraKernels () +% demo of Volterra kernel estimation and use +% +% This code demonstrates how to extract Volterra kernels from a +% dynamical system trajectory. In this particular example, +% the dynamical system is in fact a Rescorla-Wagner learning agent. Here, +% we use Volterra kernel decomposition to evaluate the long term influence +% of an input (feedback) on the immediate and future responses. +% +% See also demo_dynLearningRate +% +% ///////////////////////////////////////////////////////////////////////// + +%% Global values +% ========================================================================= +N = 1e3; + +%% Definition of the model +% ========================================================================= +% simple Rescorla-Wagner model +f_fname = @f_rwl; +g_fname = @g_Id; + +%% Simulation +% ========================================================================= +% parameters +x0 = nan; % random initial state +theta = 0.2; % learning rate +phi = []; +alpha = Inf; +sigma = 1e2; + +% options +options = struct (); + +% feedback: random positive of negative feedbacks +fb.h_fname = @h_Id; +fb.inH.u = 2 * VBA_random ('Bernoulli', 0.5, 1, N) - 1; +fb.indy = []; +fb.indfb = 1; % where to store feedbacks in u + +% inputs will be fed in by the feedback +u = nan(1, N); + +% simulation routine +[y,x,x0,eta,e,u] = VBA_simulate (N,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0,fb); + +%% Estimation +% ========================================================================= +% dimensions of the problem +dim.n = 1; +dim.n_theta = 1; +dim.n_phi = 0; + +% inversion routine +[posterior, out] = VBA_NLStateSpaceModel(y,u,f_fname,g_fname,dim,options); + +%% Volterra analysis +% ========================================================================= +% compute kernels (NB: you could also find them in out.diagnostics) +maxLag = 16; +kernels = VBA_getVolterraKernels (posterior, out, maxLag); + +% compute influence of feedbacks on responses +theoretical = theta * (1 - theta) .^ (0 : maxLag); +empirical.mean = kernels.y.m'; +empirical.var = kernels.y.v'; + +% display +f = VBA_figure('Name', 'Influence of feedback on fugzre responses'); +plotUncertainTimeSeries(empirical.mean,sqrt(empirical.var),1, f); +hold on +plot(theoretical,'b') +legend({'estimate (Volterra weight)','credible interval','theoretical'}) +box off +xlabel('lag') +ylabel('Influence of input') + diff --git a/subfunctions/demo_delays.m b/demos/1_advanced/demo_delays.m similarity index 95% rename from subfunctions/demo_delays.m rename to demos/1_advanced/demo_delays.m index 786f6e0e..73b6a2bf 100644 --- a/subfunctions/demo_delays.m +++ b/demos/1_advanced/demo_delays.m @@ -74,10 +74,10 @@ dim.n = 2; % Build time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); % display time series of hidden states and observations -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); % Call inversion routine [posterior,out] = VBA_NLStateSpaceModel(y,u,f_fname,g_fname,dim,options); diff --git a/demos/1_advanced/demo_designOptimization.m b/demos/1_advanced/demo_designOptimization.m new file mode 100644 index 00000000..a79d07d8 --- /dev/null +++ b/demos/1_advanced/demo_designOptimization.m @@ -0,0 +1,297 @@ +function demo_designOptimization () +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% [posterior, out] = demo_designOptimization () +% demo of off-line and online design optimisation +% +% This demo simulates a psychophysics paradigm similar to a signal +% detection task, whereby the detection probability is a sigmoidal function +% of the stimulus contrast (which is the design control variable). +% Our goal is to estimate the inflexion point (detection threshold) and the +% sigmoid steepness (d prime) or the response function. +% In order to provide the most efficient estimate of these model parameters, +% we will show how to use offline (before the experiment) and online +% (during the experiment) design optimization to decide given trial-by-trial +% subjects' binary choice data (seen/unseen) which stimulus to show next. +% +% ///////////////////////////////////////////////////////////////////////// + +%% Global values +% ========================================================================= + +% number of trials for the respective simulations +N = 50; + +% true parameter values for the simulations +phi = [- .5; 2.5]; % simulated parameters: [inflexion point, log-slope] + +% range of potential simuli +uRange = linspace(- 1, 1, N); + +% prepare display +VBA_figure('Name', 'demo_designOptimisation'); + +%% Define the model +% ========================================================================= + +% observation function +% ------------------------------------------------------------------------- +function [gx, dgdx, dgdp] = g_psychometric (~, phi, u, ~) + % tip: VBA_sigmoid returns derivatives wrt parameters in alphabetical + % order, ie. center -> slope. + [gx, ~, dgdp] = VBA_sigmoid (u,... + 'center', phi(1), ... + 'slope', exp (phi(2)) ... + ); + dgdp(2,:) = dgdp(2,:) * exp (phi(2)); + dgdx = []; +end + +% dimensions +% ------------------------------------------------------------------------- +dim.n_phi = 2; +dim.n_t = 1; +dim.p = N; + +% general options +% ------------------------------------------------------------------------- +% binary observations +options.sources.type = 1; + +% no display +options.DisplayWin = 0; +options.verbose = 0; + +%% 1) No optimisation +% ========================================================================= +% here, we'll use the most naive approach, a full swipe of all possible +% stimulus intensities + +% experimental design +% ------------------------------------------------------------------------- +u = uRange'; + +% simulate responses +% ------------------------------------------------------------------------- +y = VBA_simulate (1,[],@g_psychometric,[],phi,u,[],[],options); + +% estimate parameters +% ------------------------------------------------------------------------- +posterior_naive = VBA_NLStateSpaceModel (y,u,[],@g_psychometric,dim,options); + +% display results +% ------------------------------------------------------------------------- +plot_design(1, 'no optimisation', u, y, posterior_naive); + +%% 2) Offline optimisation +% ========================================================================= +% Here, we will try to find a better design before running the experiment + +% experimental design +% ------------------------------------------------------------------------- +% number of designs to try +nAttempts = 1e4; + +% initialization +fprintf('Offline optimisation: optimizing ( 0%%)'); +efficiency_offOpt = - Inf; +efficiency = nan(1, length (uRange)); + +% loop over designs +for attempt = 1 : nAttempts + + % draw random design + u_attempt = uRange(randi (numel (uRange), 1, N))'; + + % estimate efficiency + efficiency(attempt) = VBA_designEfficiency([],@g_psychometric,dim,options,u_attempt,'parameters'); + + % if better, store and display + if efficiency(attempt) > efficiency_offOpt + efficiency_offOpt = efficiency(attempt); + u = sort(u_attempt); + plot_design(2, 'offline optimisation', u, y, []); + end + + % progress bar + fprintf('\b\b\b\b\b%3d%%)', round(100* attempt / nAttempts)); +end + +% simulate responses +% ------------------------------------------------------------------------- +y = VBA_simulate (1,[],@g_psychometric,[],phi,u,[],[],options); + +% estimate parameters +% ------------------------------------------------------------------------- +posterior_offline = VBA_NLStateSpaceModel (y,u,[],@g_psychometric,dim,options); + +% display results +% ------------------------------------------------------------------------- +plot_design(2, 'offline optimisation', u, y, posterior_offline); + +%% 3) Online optimisation +% ========================================================================= +% Here, we will optimize the design during the experiment by taking into +% account trial-by-trial subject's responses to adaptively select the next +% best stimulus to present + +% initialization +opt = options; +u = nan(N, 1); +efficiency = nan(1, length (uRange)); + +% run experiment +for t = 1 : N + + % extend design + % --------------------------------------------------------------------- + % start from current posterior belief + try + opt.priors = posterior_online; + end + + % compute efficiency of potential stimuli + dim.p = 1; + for i = 1 : length (uRange) + efficiency(i) = VBA_designEfficiency([],@g_psychometric,dim,opt,uRange(i),'parameters'); + end + + % find best next stimulus to present + [~, idxMaxEff] = max (efficiency); + u(t) = uRange(idxMaxEff); + + % simulate 1 responses + % --------------------------------------------------------------------- + y(t) = VBA_simulate (1,[],@g_psychometric,[],phi,u(t),Inf,[],options); + + % estimate parameters given data acquired so far + % --------------------------------------------------------------------- + dim.p = t; + posterior_online = VBA_NLStateSpaceModel(y(1:t),u(1:t),[],@g_psychometric,dim,options); + + % display + % --------------------------------------------------------------------- + plot_design(3, 'online optimisation', u, y, posterior_online, efficiency); + +end + +% display +% --------------------------------------------------------------------- +plot_design(3, 'online optimisation', u, y, posterior_online); + +%% show results +% ========================================================================= + +fprintf('\nSimulation results:\n'); + +disp (table ( ... + phi, ... + posterior_naive.muPhi, ... + posterior_offline.muPhi, ... + posterior_online.muPhi, ... + 'RowNames', {'center','slope'}, ... + 'VariableNames',{'true','naive','offline','online'})); + +%% ######################################################################## +% display subfunction +% ######################################################################## + +function plot_design(idx, titleTxt, u, y, posterior, uEfficiency) + + % jitter for data display + persistent jitter; + if isempty(jitter) + jitter = 0.1 * (rand(N,1)-0.5); + end + + % experimental design + subplot(3,3,idx) + + if nargin == 6 % if efficiency given + + [ax,h1,h2] = plotyy(u,10,uRange,uEfficiency,@myHistogram,@myPlot); + xlim([uRange(1)-0.05, uRange(end)+0.05]); + set(get(ax(1), 'YLabel'), 'String', 'freq. of presentation') + set(get(ax(2), 'YLabel'), 'String', 'efficiency') + set(ax(1),'YLim', [0 0.4],'YTick',0:.2:.4) + xlabel('stimulus intensity') + box off + + else + + % show stimuli density + histogram(u, 10, 'EdgeColor','none','FaceColor',[.3 .3 .4],'Normalization','probability'); + xlim([uRange(1)-0.05, uRange(end)+0.05]) + ylim([0 0.4]) + xlabel('stimulus intensity') + ylabel('freq. of presentation') + box off + end + % show type of optimisation + VBA_title(gca,titleTxt); + + % show results if any + if ~isempty(posterior) + + % + observations + + subplot(3,3,3+idx) + % predictions + opt_plot = options; + opt_plot.priors = posterior; + dim_opt = dim; + dim_opt.p = numel(uRange); + muy = VBA_getLaplace(uRange',[],@g_psychometric,dim_opt,opt_plot); + plot(uRange,muy,'r','LineWidth',2); + % true model + hold on + plot(uRange,g_psychometric([],phi,uRange),'Color',[0 .8 0],'LineWidth',2); + % data + plot(u,y+jitter(1:numel(y)),'.k'); + + % options + ylim([-0.1 1.1]) + xlim([uRange(1)-0.05, uRange(end)+0.05]) + xlabel('stimulus intensity') + ylabel('prob. of detection') + hold off + box off + if idx == 1 + text(.2,.6,'true model','Color',[0 .8 0]); + text(.2,.4,'observations','Color','k'); + text(.2,.2,'predicted','Color','r'); + end + + % + parameters + + subplot(3,3,6+idx); + + % posterior estimates + plotUncertainTimeSeries(posterior.muPhi,sqrt(diag(posterior.SigmaPhi)),[],gca); + % true values + hold on + plot(phi,'o','MarkerFaceColor',[0 .8 0], 'MarkerEdgeColor',[0 .8 0]); + % options + set(gca,'XTickLabel',{'center','slope'}) + ylim([-2 4]) + xlabel('parameter') + ylabel('posterior estimate') + hold off + box off + end + + drawnow +end + + function h = myHistogram (x,y) + h = histogram(x,y,'EdgeColor','none','FaceColor',[.3 .3 .4],'Normalization','probability'); + end + + function h = myPlot (x,y) + h = plot(x,y); + hold on + [mE, iE] = max (y); + plot(uRange(iE),mE,'o','MarkerFaceColor',[.3 .3 .3],'MarkerEdgeColor',[.3 .3 .3]) + hold off + end +end diff --git a/demos/1_advanced/demo_noiseAR1.m b/demos/1_advanced/demo_noiseAR1.m new file mode 100644 index 00000000..d7bba62a --- /dev/null +++ b/demos/1_advanced/demo_noiseAR1.m @@ -0,0 +1,156 @@ +function [posterior, out] = demo_noiseAR1 () +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% [posterior, out] = demo_noiseAR1 () +% Demo for linear system with auto-regressive AR(1) state noise +% +% The class of generative models that the toolbox can deal with is actually +% more general that may appear at first glance. In fact, it can deal with +% any form of of auto-regressive or state-dependant noises (both at the +% level of hidden states and observations). In most instances, it suffices +% to construct an augmented state space X, where Xt = (xt,zt) and +% appropriately modify the evolution and observation functions, as well as +% the priors. In the case of AR(1) noise, this could be implemented as +% follows: +% $$ +% X_t+1 = [ f(x_t,theta,u_t) + z_t ; z_t ] + eta_t +% y_t = g(x_t,phi) + e_t +% $$ +% where the f is the original evolution function. +% Note that both AR(1) and white noise (respectively z_t and eta_t) can +% drive the system. To ensure that z is the most likely perturbation force +% on the system, one can define the augmented state noise covariance matrix +% Qx (of eta) such that its upper-left half is effectively zero. +% In addition, one may have to increase the lag k of the estimation +% procedure. This is because the effect of the AR(1) retarded state noise +% on the hidden states is maximal one time step in the future. One thus +% need to look back one time step in the past to infer on the retarded +% state noise. +% +% ///////////////////////////////////////////////////////////////////////// + +% number of observations +N = 1e2; + +% timestep +dt = 1e-1; + +%% Specify the model +% ========================================================================= +% we use the generic embeding functions for AR(1) noise: the original +% evolution and observation function are passed as options to the embeding +% scheme model + +% embedding function +% ------------------------------------------------------------------------- +f_fname = @f_embedAR; % this is an AR(1) embedding evolution function +g_fname = @g_embedAR; % this is an AR(1) embedding observation function + +% definition of the native model +% ------------------------------------------------------------------------- + +% evolution and observation functions +in.opt.f_fname = @f_lin2D; +in.opt.g_fname = @g_Id; + +% options structures +in.opt.inF.deltat = dt; +in.opt.inF.b = 5e-1; + +% dimension +in.dim.n = 2; +in.dim.n_theta = 1; +in.dim.n_phi = 0; +in.dim.p = 2; +in.dim.n_t = N; + +% trigger the initialization of priors and options for a stochastic system +u = zeros(1,N); +in.opt.priors.a_alpha = 1; +in.opt.priors.b_alpha = 1; +in.opt = VBA_check([],u,in.opt.f_fname,in.opt.g_fname,in.dim,in.opt); + +%% Simulate data +% ========================================================================= + +% Parameters of the simulation +x0 = cat(1,[0; 0], [0; 0]); % [x_t; z_t] +theta = 1; +phi = []; +alpha = 1e1; +sigma = 1e-1; + +% pass on option sructures +options.inF = in; +options.inG = in; + +% % increase precision on non-AR(1) state noise component and therefore +% % increase the weight of z_t on system innovation +% % ------------------------------------------------------------------------- +% iQxt = 1e2 * eye (in.dim.n); % precision of eta_t component added to x_t +% iQzt = eye (in.dim.n); % precision of eta_t component added to z_t +% iQx = blkdiag (iQxt, iQzt); +% +% for t = 1 : N +% options.priors.iQx{t} = iQx; +% end + +% Build time series of hidden states and observations +[y,x,x0,eta,e] = VBA_simulate (N,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0); + +% display time series of hidden states and observations +displaySimulations(y,x,eta,e); + +%% Inversion: accounting for AR(1) noise +% ========================================================================= + +% dimension of the problem +dim = in.dim; +dim.n = 2 * dim.n; % [x_t; z_t] + +% stochastic evolution +options.priors.a_alpha = 1; +options.priors.b_alpha = 1; + +% shrinkage prior on X0 +options.priors.SigmaX0 = 1e-1 * eye(dim.n); + +% lag of the inversion scheme +options.backwardLag = 16; + +% Call inversion routine with AR(1) priors on state noise +[posterior.AR, out.AR] = VBA_NLStateSpaceModel(y,u,f_fname,g_fname,dim,options); + +% Display results +[h(1), h(2)] = displayResults(posterior.AR,out.AR,y-e,x,x0,theta,phi,alpha,sigma); +h(3) = out.AR.options.hf; +set (h, 'Name', 'Results: AR(1) noise'); + + +%% Inversion: white noise only +% ========================================================================= +% now we invert data without AR(1) priors on state noise + +% use native dynamics +f_fname = @f_lin2D; +g_fname = @g_Id; +% no state expansion +dim.n = in.dim.n; +% undo embedding +options.inF = in.opt.inF; +options.inG = in.opt.inG; +% reset priors on state innovation +for t = 1 : N + options.priors.iQx{t} = eye(2); +end +% shrinkage prior on X0 +options.priors.SigmaX0 = 1e-1 * eye(dim.n); + +% Call classical inversion routine (white noise) +[posterior.WN,out.WN] = VBA_NLStateSpaceModel(y,u,f_fname,g_fname,dim,options); + +% Display results +[h(1), h(2)] = displayResults(posterior.WN,out.WN,y-e,x(1:2,:),x0(1:2),theta,phi,alpha,sigma); +h(3) = out.WN.options.hf; +set (h, 'Name', 'Results: white noise'); + diff --git a/demos/1_advanced/demo_stochasticModel.m b/demos/1_advanced/demo_stochasticModel.m new file mode 100644 index 00000000..04593185 --- /dev/null +++ b/demos/1_advanced/demo_stochasticModel.m @@ -0,0 +1,77 @@ +function [posterior, out] = demo_stochasticModel () +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% [posterior, out] = demo_stochasticModel () +% Demo for stochastic system with binomial output +% +% This demo simulates and inverts a model of a stochastic dynamical system, +% which is observed through a nonlinear sigmoid mapping (binary +% observations). +% +% ///////////////////////////////////////////////////////////////////////// + +% number of observations +N = 1e2; + +%% Specify the model +% ========================================================================= + +% AR(1) evolution function +function [fx, dfdx, dfdp] = f_evolution(x, ~, ~, ~) + fx = x; + dfdx = eye(length(x)); + dfdp = []; +end + +% binary observations from a biased sigmoid mapping +function [gx, dgdx, dgdp] = g_observation(x, P, ~, in) + [gx, dgdx, dsdp] = VBA_sigmoid(x,'slope',exp (P), in, 'derivatives', {'slope'}); + dgdx = dgdx'; + dgdp = dsdp * exp (P); +end + +% bias of the sigmoid mapping +options.inG.center = randn(4,1); + +% binary observations +options.sources.type = 1; + +%% Simulate data +% ========================================================================= + +% precision of stochastic innovation +alpha = 1e1; +% parameters +theta = []; +phi = 0.5; +x0 = 0; + +% simulate +[y,x,x0,eta,e] = VBA_simulate (N,@f_evolution,@g_observation,theta,phi,[],alpha,[],options,x0); + +% display time series of hidden states and observations +displaySimulations(y,x,eta,e); + +%% Inversion +% ========================================================================= +% override default priors (deterministic -> stochastic evolution) +options.priors.a_alpha = 1; +options.priors.b_alpha = 1; + +% window of the hidden state estimation filtering +options.backwardLag = 5; + +% dimensions of the problem +dim.n_theta = 0; +dim.n_phi = 1; +dim.n = 1; + +% inversion routine +[posterior, out] = VBA_NLStateSpaceModel(y,[],@f_evolution,@g_observation,dim,options); + +%% Results +% ========================================================================= +% Display results +displayResults(posterior,out,y-e,x,x0,theta,phi,alpha,[]); + +end \ No newline at end of file diff --git a/subfunctions/demo_susceptibility.m b/demos/1_advanced/demo_susceptibility.m similarity index 97% rename from subfunctions/demo_susceptibility.m rename to demos/1_advanced/demo_susceptibility.m index e70df2f2..01f32cec 100644 --- a/subfunctions/demo_susceptibility.m +++ b/demos/1_advanced/demo_susceptibility.m @@ -47,7 +47,7 @@ for i=1:N disp(i) u = randn(4,dim.n_t); - [y,x,x0,eta,e] = simulateNLSS(dim.n_t,[],g_fname,[],phi,u,[],sigma,options,zeros(dim.n,1)); + [y,x,x0,eta,e] = VBA_simulate (dim.n_t,[],g_fname,[],phi,u,[],sigma,options,zeros(dim.n,1)); [posterior,out] = VBA_NLStateSpaceModel(y,u,[],g_fname,dim,options); % displayResults(posterior,out,y-e,[],[],[],phi,sigma,[]) result(i) = VBA_susceptibility(posterior,out); diff --git a/subfunctions/demo_trainTest.m b/demos/1_advanced/demo_trainTest.m similarity index 94% rename from subfunctions/demo_trainTest.m rename to demos/1_advanced/demo_trainTest.m index 1fa81da5..e1630034 100644 --- a/subfunctions/demo_trainTest.m +++ b/demos/1_advanced/demo_trainTest.m @@ -11,7 +11,7 @@ g_fname = @g_classif; -options.binomial = 1; +options.sources = struct('type',1 ,'out', 1); % one binomial observation; options.priors.muPhi = zeros(dim.n_phi,1); options.priors.SigmaPhi = 1e0*eye(dim.n_phi); options.DisplayWin = 0; @@ -37,7 +37,7 @@ % pick a model at random (as well as its parameters) options.inG.X = randn(dim.n_phi-1,dim.p); phi = randn(dim.n_phi,1); - [y,x,x0,eta,e] = simulateNLSS(dim.n_t,[],g_fname,[],phi,[],[],[],options,[]); + [y,x,x0,eta,e] = VBA_simulate (dim.n_t,[],g_fname,[],phi,[],[],[],options,[]); g = y-e; g = g>0.5; % denoised data % 4-fold train/test @@ -127,7 +127,7 @@ title(ha,'V[acc(train)]-V[acc(test)] (balanced)') -getSubplots +VBA_getSubplots (); diff --git a/VBA_LinDecomp.m b/demos/2_statistics/VBA_LinDecomp.m similarity index 95% rename from VBA_LinDecomp.m rename to demos/2_statistics/VBA_LinDecomp.m index 06bdc983..39265816 100644 --- a/VBA_LinDecomp.m +++ b/demos/2_statistics/VBA_LinDecomp.m @@ -34,7 +34,7 @@ % set priors and invert model options.priors.muPhi = 1e-3*ones(dim.n_phi,1); options.priors.SigmaPhi = 1e2*eye(dim.n_phi); -[posterior,out] = VBA_hyperparameters(vec(Y),[],[],g_fname,dim,options); +[posterior,out] = VBA_hyperparameters(VBA_vec(Y),[],[],g_fname,dim,options); % recover A and B matrices from VBA posterior pdf A = NaN(n,k); B = NaN(k,p); @@ -79,7 +79,7 @@ SS_err = sum((Y(:)-Yhi(:)).^2); R2i = 1-(SS_err/SS_tot); dR2(i) = out.fit.R2 - R2i; - yhi(:,i) = vec(A(:,i)*B(i,:)); + yhi(:,i) = VBA_vec(A(:,i)*B(i,:)); end ha = subplot(3,2,5,'parent',hf); hb = bar(1:k,dR2); @@ -97,7 +97,7 @@ colorbar('peer',ha) title(ha,'components'' correlation') end - getSubplots + VBA_getSubplots (); end diff --git a/subfunctions/demo_CI.m b/demos/2_statistics/demo_CI.m similarity index 92% rename from subfunctions/demo_CI.m rename to demos/2_statistics/demo_CI.m index f73a7816..319af709 100644 --- a/subfunctions/demo_CI.m +++ b/demos/2_statistics/demo_CI.m @@ -27,7 +27,7 @@ options.priors = post; options.priors = rmfield(options.priors,'iQy'); sx = std(X); -X0 = vec(min(X)-3*sx:1e-1:max(X)+3*sx); % extend the postdiction outside domain of fitted data +X0 = VBA_vec(min(X)-3*sx:1e-1:max(X)+3*sx); % extend the postdiction outside domain of fitted data dim.p = length(X0); dim.n_t = 1; options.inG.X = [X0,ones(dim.p,1)]; @@ -37,7 +37,7 @@ % Get classical CI from sampling the posterior density N = 1e4; -sV = VBA_getISqrtMat(post.SigmaPhi,0); +sV = VBA_sqrtm (post.SigmaPhi); phi = repmat(post.muPhi,1,N) + sV*randn(2,N); ev = post.b_sigma./post.a_sigma; E = 0;% sqrt(ev)*randn(length(X0),N); % add in predicted residuals? @@ -67,4 +67,4 @@ title(ha,'postdictive standard deviation') set(ha,'xlim',[min(X0),max(X0)]) -getSubplots +VBA_getSubplots (); diff --git a/demos/statistics/demo_GLM.m b/demos/2_statistics/demo_GLM.m similarity index 100% rename from demos/statistics/demo_GLM.m rename to demos/2_statistics/demo_GLM.m diff --git a/stats&plots/demo_GLM_missingData.m b/demos/2_statistics/demo_GLM_missingData.m similarity index 96% rename from stats&plots/demo_GLM_missingData.m rename to demos/2_statistics/demo_GLM_missingData.m index 005ecffc..fb14eff5 100755 --- a/stats&plots/demo_GLM_missingData.m +++ b/demos/2_statistics/demo_GLM_missingData.m @@ -34,7 +34,7 @@ options.inG = inG; [posterior,out] = VBA_NLStateSpaceModel(y,[],[],g_fname,dim,options); -phi = [b;vec(X(inG.xmd))]; +phi = [b;VBA_vec(X(inG.xmd))]; displayResults(posterior,out,g,[],[],[],phi,1/s.^2,[]); diff --git a/subfunctions/demo_KalmanSmoother.m b/demos/2_statistics/demo_KalmanSmoother.m similarity index 97% rename from subfunctions/demo_KalmanSmoother.m rename to demos/2_statistics/demo_KalmanSmoother.m index 08b3d446..2aa02e6a 100644 --- a/subfunctions/demo_KalmanSmoother.m +++ b/demos/2_statistics/demo_KalmanSmoother.m @@ -20,7 +20,7 @@ alpha = 1/var(eta); sigma = 1/var(e); -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); f_fname = @f_AR; diff --git a/subfunctions/demo_LinDecomp.m b/demos/2_statistics/demo_LinDecomp.m similarity index 100% rename from subfunctions/demo_LinDecomp.m rename to demos/2_statistics/demo_LinDecomp.m diff --git a/stats&plots/demo_RFT.m b/demos/2_statistics/demo_RFT.m similarity index 98% rename from stats&plots/demo_RFT.m rename to demos/2_statistics/demo_RFT.m index f26cf423..cfea6648 100644 --- a/stats&plots/demo_RFT.m +++ b/demos/2_statistics/demo_RFT.m @@ -53,6 +53,6 @@ plot(ha,sqrt(v*8*log(2)),sqrt(v*8*log(2)),'k--') xlabel(ha,'smoothing kernel FWHM') ylabel(ha,'RFT-FWHM') -getSubplots +VBA_getSubplots (); %save RFT.mat \ No newline at end of file diff --git a/stats&plots/demo_RFT_GLM.m b/demos/2_statistics/demo_RFT_GLM.m similarity index 98% rename from stats&plots/demo_RFT_GLM.m rename to demos/2_statistics/demo_RFT_GLM.m index f9db29ac..303145ff 100644 --- a/stats&plots/demo_RFT_GLM.m +++ b/demos/2_statistics/demo_RFT_GLM.m @@ -56,4 +56,4 @@ plot(ha,sqrt(v*8*log(2)),sqrt(v*8*log(2)),'k--') xlabel(ha,'smoothing kernel FWHM') ylabel(ha,'RFT-FWHM') -getSubplots \ No newline at end of file +VBA_getSubplots (); \ No newline at end of file diff --git a/subfunctions/demo_covComp.m b/demos/2_statistics/demo_covComp.m similarity index 91% rename from subfunctions/demo_covComp.m rename to demos/2_statistics/demo_covComp.m index 28ac5187..b635e36e 100644 --- a/subfunctions/demo_covComp.m +++ b/demos/2_statistics/demo_covComp.m @@ -18,12 +18,12 @@ theta = 0; for j=1:length(Qp) - theta = theta + exp(mp(j))*VBA_getISqrtMat(Qp{j},0)*randn(4,1); + theta = theta + exp(mp(j))*VBA_sqrtm(Qp{j})*randn(4,1); end e = 0; for i=1:length(Qy) - e = e + exp(my(i))*VBA_getISqrtMat(Qy{i},0)*randn(24,1); + e = e + exp(my(i))*VBA_sqrtm(Qy{i})*randn(24,1); end y = X*theta + e; diff --git a/stats&plots/demo_generalizability.m b/demos/2_statistics/demo_generalizability.m similarity index 96% rename from stats&plots/demo_generalizability.m rename to demos/2_statistics/demo_generalizability.m index 20b98b1c..bf0ba5a6 100644 --- a/stats&plots/demo_generalizability.m +++ b/demos/2_statistics/demo_generalizability.m @@ -17,7 +17,7 @@ C = 0.5*ones(2*n,2*n) + 0.5*eye(2*n); -N = 32; % # Monte-Carlo samples +N = 20; % # Monte-Carlo samples for ii=1:N % simulate data @@ -36,7 +36,7 @@ options.verbose = 0; options.inG.X = X_train(:,1:i); options.priors = []; - options.priors.SigmaPhi = 1e4*eye(i); + options.priors.SigmaPhi = 1e2*eye(i); dim.n = 0; dim.n_theta = 0; dim.n_phi = i; @@ -71,6 +71,6 @@ errorbar(mean(BIC'),std(BIC')./sqrt(N),'parent',ha,'marker','.','color','m'); plot([n,n],get(ha,'ylim'),'k--') legend(ha,{'test Log-Likelihood','train Log-Likelihood','Free Energy','AIC','BIC'}) - +ylim([-500, 0]) diff --git a/subfunctions/demo_groupbtw.m b/demos/2_statistics/demo_groupbtw.m similarity index 86% rename from subfunctions/demo_groupbtw.m rename to demos/2_statistics/demo_groupbtw.m index 8a063f9d..772055a2 100755 --- a/subfunctions/demo_groupbtw.m +++ b/demos/2_statistics/demo_groupbtw.m @@ -25,14 +25,16 @@ j./length(dr) for ii=1:Nmcmc % sample 2-tuple families (homogeneous vs heterogeneous conditions) - [f] = sampleFromArbitraryP(r0+[dr(j);-dr(j)],[1;2],N); + f = VBA_random ('Categorical', r0 + [dr(j); -dr(j)], N, 1); + % sample model evidences for each model, subject and condition L = zeros(2,N,2); % 2xNx2 (2 models; N subjects; 2 conditions). for i=1:N i1 = floor(1+rand*(Ndummy-1)); i2 = floor(1+rand*(Ndummy-1)); % pick a model at random - [m] = sampleFromArbitraryP([0.5;0.5],[1;2],1); + m = VBA_random ('Categorical', [0.5; 0.5]); + % assign LEVs, given the subject's class if f(i)==1 % homogeneous condition L(:,i,1) = L0(:,i1,m); @@ -79,22 +81,22 @@ ip = find(dr>0); in = find(dr<0); pr = 1; -y0 = vec(ep(:,in)); -y1 = vec(ep(:,ip)); -[h0(:,1),g0(:,1)] = empiricalHist(y0,pr); -[h1(:,1),g1(:,1)] = empiricalHist(y1,pr); +y0 = VBA_vec(ep(:,in)); +y1 = VBA_vec(ep(:,ip)); +[h0(:,1),g0(:,1)] = VBA_empiricalDensity(y0,pr); +[h1(:,1),g1(:,1)] = VBA_empiricalDensity(y1,pr); [proc(1),out(1),hf] = doROC(y1,y0); set(hf,'name','ROC analysis: EP') -y0 = vec(1-bor(:,in)); -y1 = vec(1-bor(:,ip)); -[h0(:,2),g0(:,2)] = empiricalHist(y0,pr); -[h1(:,2),g1(:,2)] = empiricalHist(y1,pr); +y0 = VBA_vec(1-bor(:,in)); +y1 = VBA_vec(1-bor(:,ip)); +[h0(:,2),g0(:,2)] = VBA_empiricalDensity(y0,pr); +[h1(:,2),g1(:,2)] = VBA_empiricalDensity(y1,pr); [proc(2),out(2),hf] = doROC(y1,y0); set(hf,'name','ROC analysis: BOR') -y0 = vec(pep(:,in)); -y1 = vec(pep(:,ip)); -[h0(:,3),g0(:,3)] = empiricalHist(y0,pr); -[h1(:,3),g1(:,3)] = empiricalHist(y1,pr); +y0 = VBA_vec(pep(:,in)); +y1 = VBA_vec(pep(:,ip)); +[h0(:,3),g0(:,3)] = VBA_empiricalDensity(y0,pr); +[h1(:,3),g1(:,3)] = VBA_empiricalDensity(y1,pr); [proc(3),out(3),hf] = doROC(y1,y0); set(hf,'name','ROC analysis: protected EP') hf = figure('color',[1 1 1],'name','group-BMC summary statistics: ROC analysis'); @@ -140,5 +142,5 @@ set(ha,'xlim',[0,1],'ylim',[0,1]) xlabel(ha,'sampled P(r1>r0)') ylabel(ha,'evaluated P(r1>r0|y)') -getSubplots +VBA_getSubplots (); diff --git a/demos/statistics/demo_logisticRegression.m b/demos/2_statistics/demo_logisticRegression.m similarity index 93% rename from demos/statistics/demo_logisticRegression.m rename to demos/2_statistics/demo_logisticRegression.m index 4ecec922..2537b79a 100644 --- a/demos/statistics/demo_logisticRegression.m +++ b/demos/2_statistics/demo_logisticRegression.m @@ -35,7 +35,7 @@ % Specify distribution for binary data (default is gaussian) % ------------------------------------------------------------------------- -options.binomial = 1; +options.sources.type = 1; % Simulate artificial data % ========================================================================= @@ -44,7 +44,7 @@ phi = ones(nRegressor,1); % basic example % Simulated observations -[y,x,x0,eta,e] = simulateNLSS(n,[],g_fname,[],phi,u,[],[],options); +[y,x,x0,eta,e] = VBA_simulate (n,[],g_fname,[],phi,u,[],[],options); % display time series of hidden states and observations figure, @@ -55,7 +55,7 @@ % Run the estimation % ========================================================================= -% Call inversion routine +% Call inversion routines [posterior,out] = VBA_NLStateSpaceModel(y,u,[],g_fname,dim,options); % Display results diff --git a/stats&plots/demo_mediation.m b/demos/2_statistics/demo_mediation.m similarity index 100% rename from stats&plots/demo_mediation.m rename to demos/2_statistics/demo_mediation.m diff --git a/subfunctions/demo_redundancy.m b/demos/2_statistics/demo_redundancy.m similarity index 97% rename from subfunctions/demo_redundancy.m rename to demos/2_statistics/demo_redundancy.m index 39ced0a3..62672500 100755 --- a/subfunctions/demo_redundancy.m +++ b/demos/2_statistics/demo_redundancy.m @@ -33,7 +33,7 @@ s1(i) = p1.SigmaPhi; m2(i) = ones(2,1)'*p2.muPhi; s2(i) = ones(2,1)'*p2.SigmaPhi*ones(2,1); - c = cov2corr(p2.SigmaPhi); + c = VBA_cov2corr(p2.SigmaPhi); c2(i) = c(1,2); end diff --git a/demos/2_statistics/demo_sparsityPrior.m b/demos/2_statistics/demo_sparsityPrior.m new file mode 100644 index 00000000..40e0db91 --- /dev/null +++ b/demos/2_statistics/demo_sparsityPrior.m @@ -0,0 +1,178 @@ +function demo_sparsityPrior () +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% demo_sparsityPrior () +% demo of penalized regression (Bayesian LASSO) +% +% This demo shows how to perform a GLM regression using a L1-norm, L2-norm, +% or an adaptive norm regularization. +% Critically, we vary the number of "dummy" regressors (that are not used +% to generate the data), and assess how this level of sparsity affects the +% accuracy of the respective regularized regressions. +% +% Background: +% ~~~~~~~~~~~ +% +% When the number of parameters to estimate is higher than the number of +% data points, it can be usefull to apply sparsity constraints during model +% fitting. In general, this is done by defining a penalty term (eg the l1- +% or l2-norm) that help shrinking the number of effective parameters, as +% for example in the LASSO estimator or elastic nets. +% Here, we demonstrate how applying a simple transformation on the +% parameters allows us to emulate l1-regularization in a regression, and +% therefore perform a Bayesian LASSO estimation. +% +% See arXiv:1703.07168 for more details. +% +% ///////////////////////////////////////////////////////////////////////// + +% Illustrating the transformation +% ========================================================================= +displaySparsifyTransform () + +% Parameters of the demo +% ========================================================================= +% number of observations (data points) +N = 16; +% total number of regressors +K = 32; +% number of useless regressors (0 weight) +sparsity = 16 : 3 : K ; +% number of Monte - Carlo simulations for each sparsity level +M = 30; + +% observation precision +sigma = 10; + +% options +options.DisplayWin = 0; +options.verbose = 0; + +% main loop +% ========================================================================= +% loop over different levels of sparsity underlying the data +for i = 1 : numel(sparsity) + % do multiple times + for m = 1 : M + + % simulate data + % ----------------------------------------------------------------- + % design matrix (set or regressors) + X = randn (N, K); + % weight of the regressors + phi = [ones(K - sparsity(i), 1) ; zeros(sparsity(i), 1)]; + % generate random observations + y = X * phi + VBA_random ('Gaussian', 0, 1 / sigma, N, 1); + % store for inversion + options.inG.X = X; + + % L1 estimator (emulated Laplace priors) + % ----------------------------------------------------------------- + dims.n_phi = K; + [p1, o1] = VBA_NLStateSpaceModel (y, [], [], @g_GLMsparse, dims, options); + % recover transformed parameters + phiHat = VBA_sparsifyPrior (p1.muPhi); + accuracy(2, i, m) = myCorr (phi, phiHat); + + % L2 estimator (usual Gaussian priors) + % ----------------------------------------------------------------- + dims.n_phi = K; + [p2, o2] = VBA_NLStateSpaceModel (y, [], [], @g_GLM, dims, options); + phiHat = p2.muPhi; + accuracy(1, i, m) = myCorr (phi, phiHat); + + % adaptive sparse-estimator + % ----------------------------------------------------------------- + % one aditional parameter for the exponent + dims.n_phi = K + 1; + % priors set to l1-norm + optionsAdapt = options; + optionsAdapt.priors.muPhi = [zeros(K, 1); log(2)]; + optionsAdapt.priors.SigmaPhi = diag([ones(K, 1); 0.2]); + % estimation + [p3, o3] = VBA_NLStateSpaceModel (y, [], [], @g_GLMsparseAdapt, dims, optionsAdapt); + % get estimated exponent + exponent(i, m) = exp (p3.muPhi(K + 1)); + % recover transformed parameters + phiHat = VBA_sparsifyPrior (p3.muPhi(1 : end - 1), log (exponent(i, m))); + accuracy(3, i, m) = myCorr (phi, phiHat); + + + end +end + +% Display +% ========================================================================= +hf = VBA_figure ('name', 'demo_sparsityPrior: results'); + +% accuracy +ha = subplot(1,2,1,'parent',hf); +mr = mean(accuracy,3); +sr = std(accuracy,[],3) / sqrt (M); +hb = errorbar(ha,mr',sr'); +set(hb(1), 'Color', 'b') +set(hb(2), 'Color', 'r') +set(hb(3), 'Color', 'm') +legend(ha,{'L2-norm (Gaussian)','L1-norm (Laplace)','Adaptive sparsity'},'Location','northwest') +xlabel(ha,'sparsity of generative model') +ylabel(ha,'parameter estimation accuracy') +box off +% adaptive exponent +ha = subplot(1,2,2,'parent',hf); +mse = mean(exponent,2); +sse = std(exponent,[],2) / sqrt (M); +hb = errorbar(ha,mse,sse); +set(hb, 'Color', 'm'); +xlabel(ha,'sparsity of generative model') +ylabel(ha,'estimated sparsity exponent') +box off +end + +% Subfunctions +% ######################################################################### +function displaySparsifyTransform () + +hf = VBA_figure ('name', 'demo_sparsityPrior: sparsify mapping'); + +x = -4 : 0.01 : 4; +gridP = [- log(2), 0, log(2)]; +for i = 1 : length (gridP) + sx(i, :) = VBA_sparsifyPrior (x, gridP(i)); +end +ha = subplot (1, 2, 1, 'parent', hf); +hb = plot (ha, x, sx'); +set(hb(1), 'Color', 'g') +set(hb(2), 'Color', 'b') +set(hb(3), 'Color', 'r') +box off +xlabel ('original parameter') +ylabel ('transformed parameter') +legend (ha, ... + {'sparsity exponent = 1/2', 'sparsity exponent = 1 ( l2-norm )', 'sparsity exponent = 2 ( l1-norm )' }, ... + 'Location', 'northwest'); + + +ha = subplot (1, 2, 2, 'parent', hf); +for i = 1 : length (gridP) + ps = interp1 (sx(i, :), normpdf (x, 0, 1), x); + ps = ps / nansum (ps); + hb(i) = plot (ha, x, ps); + hold on; +end +set(hb(1), 'Color', 'g') +set(hb(2), 'Color', 'b') +set(hb(3), 'Color', 'r') +hold off +box off +xlabel ('transformed parameter') +ylabel ('prior density') + + + +end + +function r = myCorr (x, y) + tmp = corrcoef (x, y); + r = tmp(1, 2); +end + diff --git a/subfunctions/demo_2DChoices.m b/demos/3_behavioural/demo_2DChoices.m similarity index 90% rename from subfunctions/demo_2DChoices.m rename to demos/3_behavioural/demo_2DChoices.m index 9ad01ca7..d8b0459f 100755 --- a/subfunctions/demo_2DChoices.m +++ b/demos/3_behavioural/demo_2DChoices.m @@ -33,9 +33,9 @@ dim.p = 1; dim.n_t = ntrials; options.inG = in; -options.binomial = 1; +options.sources = struct('type',1 ,'out', 1); % one binomial observation options.dim = dim; -[y,x,x0,eta,e] = simulateNLSS(ntrials,[],g_fname,[],phi,u,[],[],options); +[y,x,x0,eta,e] = VBA_simulate (ntrials,[],g_fname,[],phi,u,[],[],options); % graphical summary of choice data @@ -84,11 +84,11 @@ n1 = 1e2; % density of grid for R n2 = 5e1; % density of grid for t N = 4; % # 1D-DCT bsis functions -X = Fourier2DBF(n1,n2,N,0); +X = VBA_Fourier2DBF(n1,n2,N,0); inb.ind.x = in.ind.t; inb.ind.y = in.ind.R; -inb.gx = linspace(min(vec(R)),max(vec(R)),n1); -inb.gy = linspace(min(vec(t)),max(vec(t)),n2); +inb.gx = linspace(min(VBA_vec(R)),max(VBA_vec(R)),n1); +inb.gy = linspace(min(VBA_vec(t)),max(VBA_vec(t)),n2); inb.bf = X; g_fname = @g_2AFC_basis; dim = []; @@ -97,7 +97,7 @@ dim.n_theta = 0; options.priors.SigmaPhi = 1e0*eye(dim.n_phi); options.inG = inb; -options.binomial = 1; +options.sources = struct('type',1 ,'out', 1); % one binomial observation; options.DisplayWin = 0; options.verbose = 1; [p0,o0] = VBA_NLStateSpaceModel(y,u,[],g_fname,dim,options); @@ -123,7 +123,7 @@ Vu = zeros(size(X,1),size(X,2)); for i=1:length(inb.gx) for j=1:length(inb.gy) - Vu(i,j) = vec(X(i,j,:))'*p0.SigmaPhi*vec(X(i,j,:)); + Vu(i,j) = VBA_vec(X(i,j,:))'*p0.SigmaPhi*VBA_vec(X(i,j,:)); end end @@ -165,6 +165,6 @@ title(ha,'comp') xlabel(ha,'estimated utility') ylabel(ha,'simulated utility') -getSubplots +VBA_getSubplots (); diff --git a/subfunctions/demo_AVL_recog.m b/demos/3_behavioural/demo_AVL_recog.m similarity index 91% rename from subfunctions/demo_AVL_recog.m rename to demos/3_behavioural/demo_AVL_recog.m index b3ad3da4..b9f2e08c 100644 --- a/subfunctions/demo_AVL_recog.m +++ b/demos/3_behavioural/demo_AVL_recog.m @@ -25,7 +25,7 @@ % assume static cue-outcome association x2 = randn.*ones(1,n_t); - sx2 = sigm(x2); + sx2 = VBA_sigmoid(x2); sx2 = sx2(:)'; case 2 @@ -33,14 +33,14 @@ % sample the cue-outcome association according to AR(1) model f_fname = @f_AR; g_fname = @g_sigm; - [sx2,x2,x20,eta,e] = simulateNLSS(n_t,f_fname,g_fname,[],[],[],exp(-theta(2)),Inf,[],0); + [sx2,x2,x20,eta,e] = VBA_simulate (n_t,f_fname,g_fname,[],[],[],exp(-theta(2)),Inf,[],0); case 3 % First sample the association volatility according to AR(1) model f_fname = @f_AR; g_fname = @g_Id; - [x3,x3,x30,eta,e] = simulateNLSS(n_t,f_fname,g_fname,[],[],[],exp(-theta(2)),Inf,[],0); + [x3,x3,x30,eta,e] = VBA_simulate (n_t,f_fname,g_fname,[],[],[],exp(-theta(2)),Inf,[],0); % create prior variance structure for cue-outcome association ex3 = exp(-x3); for t=1:n_t @@ -50,7 +50,7 @@ % varying prior variance f_fname = @f_AR; g_fname = @g_sigm; - [sx2,x2,x20,eta,e] = simulateNLSS(n_t,f_fname,g_fname,[],[],[],1,Inf,opt,0); + [sx2,x2,x20,eta,e] = VBA_simulate (n_t,f_fname,g_fname,[],[],[],1,Inf,opt,0); end @@ -58,9 +58,7 @@ % Then sample outcome identity from binomial pdf: x1 = zeros(1,n_t); seed = 1e4*rand; -for t=1:n_t - [x1(t)] = sampleFromArbitraryP([sx2(t),1-sx2(t)]',[1,0]',1); -end +x1 = VBA_random ('Bernoulli', sx2); % Finally, sample visual outcome from GMM ... u = zeros(2,n_t); @@ -92,7 +90,7 @@ elseif options.inF.flag == 3 X0 = [0.5;0;1e1;0;1e1;0]; end -[RT,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,Inf,Inf,options,X0); +[RT,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,Inf,Inf,options,X0); hf = figure('color',[1 1 1]); ha = subplot(2,2,1,'parent',hf,'nextplot','add'); @@ -132,7 +130,7 @@ grid(ha,'on') axis(ha,'tight') title(ha,'simulated vs observed cue-outcome association') -getSubplots +VBA_getSubplots (); % Now OBSERVE THE OBSERVER: diff --git a/demos/3_behavioural/demo_BSL.m b/demos/3_behavioural/demo_BSL.m new file mode 100644 index 00000000..6b290e11 --- /dev/null +++ b/demos/3_behavioural/demo_BSL.m @@ -0,0 +1,121 @@ +function demo_BSL +% This script simulates and inverts a Bayesian sequence learner (BSL) + +% simulation parameters +K = 1; +options.inF = struct('K',K); +options.inG = struct('K',K); +dim.n = 2^(K+1); +dim.n_theta = 1; +dim.n_phi = 2; + + + +%% simulate sequence of BSL choices +x0 = 1*ones(dim.n,1); % log-odds of P(o=1) +theta = [-2]; % BSL's prior volatility +phi = [-1;0]; % (log-) inverse-temperature, bias +N = 150; % number of trials +p = 0.75; +P = repmat([p,1-p,1-p,p],1,N); % probabilistic repetition of [1 0 0 1] +% P = repmat(p,1,N); % probabilistic repetition of [1 0 0 1] +y = VBA_random ('Bernoulli', P(1 : N)); + +a = zeros(1,N); +gx = NaN(1,N); +x = zeros(dim.n,N); +x(:,1:K) = repmat(x0,1,K); %initialize hidden states +u(:,1:K) = NaN(K+1,K); +for i=K+2:N + u(:,i) = flipud(VBA_vec(y(i-K-1:i-1))); + if K==0 && i==1 % the issue only arises for 0-BSL (degenerated!) + x(:,i) = f_BSL(x0,theta,u(:,i),options.inF); + else + x(:,i) = f_BSL(x(:,i-1),theta,u(:,i),options.inF); + end + gx(i) = g_BSL(x(:,i),phi,u(:,i),options.inG); + a(i) = VBA_random ('Bernoulli', gx(i)); +end +hf = figure('color',[1 1 1]); +ha = subplot(2,1,1,'parent',hf); +plot(ha,x') +ha = subplot(2,1,2,'parent',hf); +plot(ha,gx) +hold on, plot([1,N],[0.5,0.5],'color',0.2*[1 1 1]) +ic = find(a(1:N)==y(1:N)); +plot(ha,ic,a(ic),'g.') +ii = find(a(1:N)~=y(1:N)); +plot(ha,ii,a(ii),'r.') +title(ha,['perf=',num2str(100*length(ic)./N,3),'%']) + + +%% invert BSL model given sequence of agent's choices +options.skipf = zeros(1,N); +options.skipf(1:K+1) = 1; +options.sources = struct('type',1 ,'out', 1); % one binomial observation; +options.SigmaTheta = 1e2*eye(dim.n_theta); +f_fname = @f_BSL; +g_fname = @g_BSL; +[posterior,out] = VBA_NLStateSpaceModel(a,u,f_fname,g_fname,dim,options); + +%% Display results +displayResults(posterior,out,a,x,x0,theta,phi,[],[]) +hf = unwrapKBSL(posterior.muX,posterior.muPhi,u,options.inG); +end + +%% ######################################################################## +function hf = unwrapKBSL(x,phi,u,inG) +% display k-BSL's evolving beliefs over trials +% function unwrapKBSL(x,options) +% Bayesian Sequence Learners (BSL) essentially update their posterior +% belief about the transition probabilities of a sequence of outcomes. This +% function displayes BSL's belief as trials unfold. [See f_BSL.m] +% IN: +% - x: k-BSL's hidden-states +% - inG: the input structure to g_kBSL.m +% OUT: +% - hf: display figure handle + +K = inG.K; % sequence depth + +nt = size(x,2); % nb of trials + +hf = figure('color',[1 1 1],'name',[num2str(K),'-BSL learner']); + +% derive BSL's prediction about next outcome +gx = 0.5*ones(1,nt); +for t=K+1:nt + gx(t) = g_BSL(x(:,t),phi,u(:,t),inG); +end +ha = subplot(2,1,1,'parent',hf); +plot(ha,gx,'color','k','marker','.') +xlabel(ha,'time/trials') +ylabel(ha,'P(a=1)') +title(ha,'BSL''s next bet') +box(ha,'off') + +% BSL's learned belief about next outcome (given past sequence) +m0 = x(1:2^K,:); % E[log-odds] +V0 = exp(x((2^K)+1:2^(K+1),:)); % V[log-odds] +a = 0.368; +EP = VBA_sigmoid(m0./(1+a*sqrt(V0))); +VP = EP.*(1-EP).*(1-1./(1+a*sqrt(V0))); + +ha = subplot(2,1,2,'parent',hf); +plotUncertainTimeSeries(EP,VP,[],ha); +% plot(ha,EP','marker','.') +xlabel(ha,'time/trials') +ylabel(ha,'P(o=1|past o)') +title(ha,'BSL''s conditional belief about P(o|past o)') +box(ha,'off') + +if K>0 + leg = cell(2^K,1); + for k=0:2^K-1 + tmp = dec2bin(k); + ntmp = length(num2str(tmp)); + leg{k+1} = ['past o = ',cat(2,repmat('0',1,K-ntmp),tmp)]; + end + legend(ha,leg) +end +end diff --git a/demos/behavioural/demo_Qlearning.m b/demos/3_behavioural/demo_Qlearning.m similarity index 94% rename from demos/behavioural/demo_Qlearning.m rename to demos/3_behavioural/demo_Qlearning.m index 341a6a47..3d3d6e86 100644 --- a/demos/behavioural/demo_Qlearning.m +++ b/demos/3_behavioural/demo_Qlearning.m @@ -5,7 +5,6 @@ % Demo of Q-learning simulation and inference % % This is a simple example of reinforcement learning algorithm. -% This demo % % Background: % ~~~~~~~~~~~ @@ -47,7 +46,7 @@ % specify model % ========================================================================= f_fname = @f_Qlearning; % evolution function (Q-learning) -g_fname = @g_softmax; % observation function (softmax mapping) +g_fname = @g_QLearning; % observation function (softmax mapping) % provide dimensions dim = struct( ... @@ -60,14 +59,14 @@ % ------------------------------------------------------------------------- % use the default priors except for the initial state options.priors.muX0 = [0.5; 0.5]; -options.priors.SimaX0 = 0.1 * eye(2); +options.priors.SigmaX0 = 0.1 * eye(2); % options for the simulation % ------------------------------------------------------------------------- % number of trials n_t = numel(choices); % fitting binary data -options.binomial = 1; +options.sources.type = 1; % Normally, the expected first observation is g(x1), ie. after % a first iteratition x1 = f(x0, u0). The skipf flag will prevent this evolution % and thus set x1 = x0 @@ -81,7 +80,7 @@ % ------------------------------------------------------------------------- fprintf('=============================================================\n'); fprintf('\nEstimated parameters: \n'); -fprintf(' - learning rate: %3.2f\n', sigm(posterior.muTheta)); +fprintf(' - learning rate: %3.2f\n', VBA_sigmoid(posterior.muTheta)); fprintf(' - inverse temp.: %3.2f\n\n', exp(posterior.muPhi)); fprintf('=============================================================\n'); diff --git a/demos/3_behavioural/demo_QlearningAsymetric.m b/demos/3_behavioural/demo_QlearningAsymetric.m new file mode 100644 index 00000000..7db8bf69 --- /dev/null +++ b/demos/3_behavioural/demo_QlearningAsymetric.m @@ -0,0 +1,236 @@ +function [posterior, out] = demo_QlearningAsymetric (data) +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% [posterior, out] = demo_QlearningAsymetric ([data]) +% Demo of Q-learning with asymetric learning rates for positive and +% negative predictions errors. +% +% This demo implements the experiment described in Frank et al. 2004, Science +% +% If no inputs are given, the demo will generate artificial data for both +% the learning and the test blocs and invert all data at once. +% +% IN: +% - [data]: +% - cues: 2 x T vector indicating the identity of the two cues +% presented at each trial +% - choices: 1 X T binary vector indicating if the subject chose the +% first (choice = 0) or the second (choice = 1) cue as encoded +% in data.cues +% - feedbacks: 1 X T vector describing the outcome of the choice. If +% a trial has no feedback (e.g. in test bloc), set value to NaN. +% +% OUT: +% - posterior, out: results of the inversion +% +% ///////////////////////////////////////////////////////////////////////// + +Nbandits = 3; + +% check inputs +% ========================================================================= + +switch nargin + case 0 + fprintf ('No inputs provided, generating simulated behavior...\n\n'); + data = simulateQlearningAsym (); + case 2 + fprintf ('Performing inversion of provided behaviour...\n\n'); + otherwise + error ('*** Wrong number of arguments.') +end + +% reformat data +% ========================================================================= +% observations +y = data.choices; + +% inputs +u = [ nan, data.choices ; % previous choice + nan, data.feedbacks ; % previous feedback + nan(2, 1), data.cues ; % previous pair + data.cues, nan(2, 1) ]; % identity of the presented cues + +% specify model +% ========================================================================= +f_fname = @f_QlearningAsym; % evolution function (Q-learning) +g_fname = @g_QLearning; % observation function (softmax mapping) + +% provide dimensions +dim = struct( ... + 'n', 2 * Nbandits, ... number of hidden states (2*N Q-values) + 'n_theta', 2, ... number of evolution parameters (1: learning rate, 2: valence effect) + 'n_phi', 1 ... number of observation parameters (1: temperature) + ); + +% options for the simulation +% ------------------------------------------------------------------------- +% use the default priors except for the initial state +options.priors.muX0 = 0.5 * ones (dim.n, 1); +options.priors.SigmaX0 = 0.01 * eye (dim.n); + +%options.priors.SigmaTheta = diag([0.1 0.1]); + +options.priors.muPhi = log(2.5); +options.priors.SigmaPhi = 1; + +% options for the simulation +% ------------------------------------------------------------------------- +% number of trials +n_t = numel(data.choices); +% fitting binary data +options.sources.type = 1; +options.verbose = false; + +% invert model +% ========================================================================= +[posterior, out] = VBA_NLStateSpaceModel(y, u, f_fname, g_fname, dim, options); + +% display estimated parameters: +% ------------------------------------------------------------------------- +fprintf('=============================================================\n'); +fprintf('\nEstimated parameters: \n'); +fprintf(' - positive learning rate: %3.2f\n', VBA_sigmoid(posterior.muTheta(1)+posterior.muTheta(2))); +fprintf(' - negative learning rate: %3.2f\n', VBA_sigmoid(posterior.muTheta(1)-posterior.muTheta(2))); +fprintf(' - inverse temp.: %3.2f\n\n', exp(posterior.muPhi)); +fprintf('=============================================================\n'); + +% invert model +% ========================================================================= +if exist('simulation','var') % used simulated data from demo_QlearningSimulation + displayResults( ... + posterior, ... + out, ... + choices, ... + simulation.state, ... + simulation.initial, ... + simulation.evolution, ... + simulation.observation, ... + Inf, Inf ... + ); +end + +% Recover predictions errors +% ========================================================================= +% get cue values +Qvalues = posterior.muX; +% trick: set learning rate to 1, no asymmetry, so that x(t+1) - x(t) = PE +theta = [Inf; 0]; +% for each trial, recompute the state evolution +for t = 1 : n_t + PE(t) = sum (f_QlearningAsym (Qvalues(:,t), theta, u(:,t+1), struct) - Qvalues(:,t)); +end + +end + +function data = simulateQlearningAsym () + +% training bloc +% ------------------------------------------------------------------------- + +nTrials = 120; + +% define pairs +cues = [repmat([1; 2], 1, nTrials) ... % A B + repmat([3; 4], 1, nTrials) ... % C D + repmat([5; 6], 1, nTrials) ]; % E F + +% define contingencies for each pair +contingencies = [zeros(1, 0.8 * nTrials), ones(1, 0.2 * nTrials) ... + zeros(1, 0.7 * nTrials), ones(1, 0.3 * nTrials) ... + zeros(1, 0.6 * nTrials), ones(1, 0.4 * nTrials) ]; + +% shuffle +p = randperm (numel (contingencies)) ; +cues = cues(:, p); +contingencies = contingencies(p); + +% testing bloc +% ------------------------------------------------------------------------- + +test = [1 1 1 1 2 2 2 2; % choose A and avoid B + 3 4 5 6 3 4 5 6]; + +test = repmat(test,1,0); + +cues = [cues test]; +contingencies = [contingencies nan(1, size(test,2))]; + +% create feedback structure for the simulation with VBA +% ------------------------------------------------------------------------- +% feedback function. Return 1 if action follow contingencies, nan if no feedback. +h_feedback = @(yt,t,in) (yt == contingencies(t)) * sign(1+contingencies(t)); +% feedback structure for the VBA +fb = struct( ... + 'h_fname', h_feedback, ... % feedback function + 'indy', 1, ... % where to store simulated choice + 'indfb', 2, ... % where to store simulated feedback + 'inH', struct() ... + ); + +% Simulate choices for the given feedback rule +% ========================================================================= + +% define parameteters of the simulated agent +% ------------------------------------------------------------------------- +% learning rate +theta = [0.6 0.3]; % learning, asymmetry +% inverse temperature +phi = log(2.5); % will be exp transformed +% initial state +x0 = 0.5 * ones(6,1); + +% options for the simulation +% ------------------------------------------------------------------------- +% number of trials +n_t = numel(contingencies); +% fitting binary data +options.sources.type = 1; +options.verbose = false; + +% simulate choices +% ------------------------------------------------------------------------- + +u = [nan(2, n_t + 1); + nan(2, 1), cues ; + cues, nan(2, 1)]; + +[y,x,x0,eta,e,u] = VBA_simulate ( ... + n_t, ... number of trials + @f_QlearningAsym, ... evolution function + @g_QLearning, ... observation function + theta, ... evolution parameters (learning rate) + phi, ... observation parameters, + u, ... dummy inputs + Inf, Inf, ... deterministic evolution and observation + options, ... options + x0, ... initial state + fb ... feedback rule + ); + + +% Return simulated choices, feedbacks, and parameters used for the +% simulation +% ========================================================================= +data.choices = y; +data.feedbacks = u(2, 2 : end); +data.cues = cues; + +% Display stat of simulated behaviour +% ========================================================================= +testT = numel(y) - size(test, 2) : numel(y); +testY = y(testT); +testU = u(:,testT); +chooseA = mean(testY(testU(3,:) == 1) == 0); +avoidB = mean(testY(testU(3,:) == 2) == 1); + +fprintf('=============================================================\n'); +fprintf('Simulated choice with asymetric learning: %03.2f\n',theta(2)); + +fprintf(' - Choose A: %03.2g%%\n',chooseA); +fprintf(' - Avoid B: %03.2g%%\n',avoidB); +fprintf('=============================================================\n'); + +end + + diff --git a/demos/behavioural/demo_QlearningSimulation.m b/demos/3_behavioural/demo_QlearningSimulation.m similarity index 94% rename from demos/behavioural/demo_QlearningSimulation.m rename to demos/3_behavioural/demo_QlearningSimulation.m index defb7d90..0a11adf2 100644 --- a/demos/behavioural/demo_QlearningSimulation.m +++ b/demos/3_behavioural/demo_QlearningSimulation.m @@ -15,9 +15,8 @@ % % ///////////////////////////////////////////////////////////////////////// - f_fname = @f_Qlearning; % evolution function (Q-learning) -g_fname = @g_softmax; % observation function (softmax mapping) +g_fname = @g_QLearning; % observation function (softmax mapping) % Create the feedback rule for the simulation % ========================================================================= @@ -25,7 +24,7 @@ % define which action should be rewarded at each trial (contingencies) % ------------------------------------------------------------------------- % probability of a positive reward following a 'correct' action -probRewardGood = 75/100; + probRewardGood = 75/100; % draw 25 random feedbacks contBloc = +(rand(1,25) < probRewardGood); % create 6 blocs with reversals @@ -51,7 +50,7 @@ % define parameteters of the simulated agent % ------------------------------------------------------------------------- % learning rate -theta = sigm(0.65,struct('INV',1)); % 0.65, once sigm transformed +theta = VBA_sigmoid(0.65,'inverse',true); % 0.65, once sigm transformed % inverse temperature phi = log(2.5); % will be exp transformed % initial state @@ -62,7 +61,7 @@ % number of trials n_t = numel(contingencies); % fitting binary data -options.binomial = 1; +options.sources.type = 1; % Normally, the expected first observation (choice) is g(x1), ie. after % a first iteratition x1 = f(x0). The skipf flag will prevent this evolution % and thus set x1 = x0 @@ -70,7 +69,7 @@ % simulate choices % ------------------------------------------------------------------------- -[y,x,x0,eta,e,u] = simulateNLSS( ... +[y,x,x0,eta,e,u] = VBA_simulate ( ... n_t+1, ... number of trials f_fname, ... evolution function g_fname, ... observation function diff --git a/subfunctions/demo_ToMgames.m b/demos/3_behavioural/demo_ToMgames.m similarity index 100% rename from subfunctions/demo_ToMgames.m rename to demos/3_behavioural/demo_ToMgames.m diff --git a/subfunctions/demo_VBfree.m b/demos/3_behavioural/demo_VBfree.m similarity index 92% rename from subfunctions/demo_VBfree.m rename to demos/3_behavioural/demo_VBfree.m index 09a17ad1..087e8d6a 100644 --- a/subfunctions/demo_VBfree.m +++ b/demos/3_behavioural/demo_VBfree.m @@ -54,11 +54,11 @@ priors.b_alpha = 0; options.priors = priors; -options.binomial = 1; +options.sources = struct('type',1 ,'out', 1); % one binomial observation; options.skipf = zeros(1,n_t); options.skipf(1) = 1; % apply identity mapping from x0 to x1. -[y,x,x0,eta,e,u] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,Inf,Inf,options,x0,fb); +[y,x,x0,eta,e,u] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,Inf,Inf,options,x0,fb); figure plot(y-e,'r') @@ -71,7 +71,7 @@ subplot(2,2,i),plot(x([i,i+4],:)'),title(ti{i}) end drawnow -getSubplots +VBA_getSubplots (); [posterior,out] = VBA_NLStateSpaceModel(y,u,f_fname,g_fname,dim,options); diff --git a/subfunctions/demo_asymRW.m b/demos/3_behavioural/demo_asymRW.m similarity index 91% rename from subfunctions/demo_asymRW.m rename to demos/3_behavioural/demo_asymRW.m index a477830d..e9518c2d 100644 --- a/subfunctions/demo_asymRW.m +++ b/demos/3_behavioural/demo_asymRW.m @@ -11,7 +11,8 @@ g_fname = @g_goNogo; % -> 'go' choice probability % simu parameters -feedbacks = {-1;0;1}; % feedbacks: negative, neutral, positive +feedbacks = [-1; 0; 1]; % feedbacks: negative, neutral, positive +contingencies = [2; 1; 2] / 5; %probability of feedbacks truemodel = 1; % index of true model (which generates the data) theta = [2;0;1;0]; phi = [-1]; % value of the 'no-go' option @@ -30,7 +31,7 @@ opt{1}.inF.inda = 2; % last choice opt{1}.priors.a_alpha = Inf; % deterministic system opt{1}.priors.b_alpha = 0; % [id] -opt{1}.binomial = 1; % binary (go/nogo) choices +opt{1}.sources.type = 1; % binary (go/nogo) choices opt{1}.skipf = zeros(1,n_t); opt{1}.skipf(1) = 1; % apply identity mapping from x0 to x1. @@ -59,20 +60,14 @@ opt{4}.inF.indAlpha = 2; % learning rates % sample random feedback sequence -u = zeros(1,n_t); -for t=1:n_t - [X] = sampleFromArbitraryP([2;1;2]./5,[1:3]',1); - u(t) = feedbacks{X}; -end - - +u = VBA_random ('Arbitrary', contingencies, feedbacks, 1, n_t); % simulate agent's response given predefined feedback sequence fb.h_fname = @h_goNogo; fb.indy = 2; fb.indfb = 1; fb.inH.u = u; -[y_choice,x,x0,eta,ee,uu] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,[0;0],Inf,[],opt{truemodel},x0,fb); +[y_choice,x,x0,eta,ee,uu] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,[0;0],Inf,[],opt{truemodel},x0,fb); y_value = x + randn(size(x))./sqrt(sigma); U = zeros(3,n_t); @@ -116,17 +111,17 @@ opt{i}.figName = opt{i}.inF.model; opt{i}.DisplayWin = 1; str{i} = opt{i}.figName; - opt{i}.binomial = 1; + opt{i}.sources.type = 1; [p{i,1},o{i,1}] = VBA_NLStateSpaceModel(y_choice,uu,f_fname,g_fname,d{i},opt{i}); F(i,1) = o{i,1}.F; % invert models on value data - opt{i}.binomial = 0; + opt{i}.sources.type = 0; [p{i,2},o{i,2}] = VBA_NLStateSpaceModel(y_value,uu,f_fname,@g_Id,d{i},opt{i}); F(i,2) = o{i,2}.F; end -pm(:,1) = softmax(F(:,1)); -pm(:,2) = softmax(F(:,2)); +pm(:,1) = VBA_softmax(F(:,1)); +pm(:,2) = VBA_softmax(F(:,2)); pf1(:,1) = [pm(1,1)+pm(3,1);pm(2,1)+pm(4,1)]; pf1(:,2) = [pm(1,2)+pm(3,2);pm(2,2)+pm(4,2)]; pf2(:,1) = [pm(2,1)+pm(3,1);pm(1,1)+pm(4,1)]; diff --git a/subfunctions/demo_discounting.m b/demos/3_behavioural/demo_discounting.m similarity index 93% rename from subfunctions/demo_discounting.m rename to demos/3_behavioural/demo_discounting.m index 5199362a..59b39a85 100644 --- a/subfunctions/demo_discounting.m +++ b/demos/3_behavioural/demo_discounting.m @@ -32,7 +32,7 @@ dim.n_t = ntrials; options.inG = in; options.dim = dim; -options.binomial = 1; +options.sources = struct('type',1 ,'out', 1); % one binomial observation; options.DisplayWin = 0; % Build time series of hidden states and observations @@ -49,7 +49,7 @@ u(in.ind.R,:) = R; for i=1:nm options.inG.model = models{i}; - [y,x,x0,eta,e] = simulateNLSS(ntrials,[],g_fname,[],phi,u,[],[],options); + [y,x,x0,eta,e] = VBA_simulate (ntrials,[],g_fname,[],phi,u,[],[],options); % Call inversion routine for j=1:nm options.inG.model = models{j}; diff --git a/subfunctions/demo_dynLearningRate.m b/demos/3_behavioural/demo_dynLearningRate.m similarity index 91% rename from subfunctions/demo_dynLearningRate.m rename to demos/3_behavioural/demo_dynLearningRate.m index 2d7b98c5..d071c9df 100644 --- a/subfunctions/demo_dynLearningRate.m +++ b/demos/3_behavioural/demo_dynLearningRate.m @@ -27,12 +27,12 @@ inF.thub = 1; inF.rf = 1; inG.respmod = 'taylor'; -options.binomial = 1; +options.sources.type = 1 ; % binomial observation; options.inF = inF; options.inG = inG; options.skipf = zeros(1,nt); options.skipf(1) = 1; -[y,x,x0,eta,e,u] = simulateNLSS(nt,@f_OpLearn,@g_VBvolatile0,theta,phi,zeros(2,nt),Inf,Inf,options,x0,fb); +[y,x,x0,eta,e,u] = VBA_simulate (nt,@f_OpLearn,@g_VBvolatile0,theta,phi,zeros(2,nt),Inf,Inf,options,x0,fb); % plot simulated behaviour hf = figure('color',[1 1 1 ],'name','simulated choices'); ha = axes('parent',hf); @@ -40,7 +40,7 @@ hold(ha,'on') plot(ha,y,'kx') legend(ha,{'p(y=1|theta,phi,m)','binomial data samples'}) -getSubplots +VBA_getSubplots (); dummy.options = options; [ha,hf] = unwrapVBvolatileOTO(struct('muX',x,'muTheta',theta),dummy); set(hf,'name','simulated volatile VB learner') @@ -78,7 +78,7 @@ opt0 = []; opt0.backwardLag = 32; opt0.priors = priors; -opt0.binomial = 1; +opt0.sources.type = 1; opt0.verbose = 1; opt0.MaxIter = 3; opt0.kernelSize = 32; @@ -88,8 +88,8 @@ % check relation between identified learning rate and volatility of VB-learner: it = 1:400; -X = [vec(p0.muX(3,:)),ones(nt,1)]; -Y = vec(sum(x([4,9],:),1)); +X = [VBA_vec(p0.muX(3,:)),ones(nt,1)]; +Y = VBA_vec(sum(x([4,9],:),1)); [pv,stat,df,all] = GLM_contrast(X,Y,[1;0],'F',1,{'learning rate','CST'},{'volatility'}); @@ -114,7 +114,7 @@ end o0_v = o0; o0_v.u = uu; -[kernels] = VBA_VolterraKernels(p0,o0_v); +[kernels] = VBA_getVolterraKernels(p0,o0_v); hf = figure('color',[1 1 1],'name','Volterra decomposition'); mk = squeeze(kernels.x.m(3,:,:))'; vk = squeeze(kernels.x.v(3,:,:))'; @@ -135,7 +135,7 @@ priors.a_alpha = Inf; priors.b_alpha = 0; opt1.priors = priors; -opt1.binomial = 1; +opt1.sources.type = 1; opt1.figName = 'augmented Q-learning model'; [p1,o1] = VBA_NLStateSpaceModel(y,u,@f_Qlearn_gammaLR,@g_softmax,d1,opt1); @@ -143,7 +143,7 @@ % extract and plot Volterra kernels o1_v = o1; o1_v.u = uu; -[kernels] = VBA_VolterraKernels(p1,o1_v); +[kernels] = VBA_getVolterraKernels(p1,o1_v); mk = squeeze(kernels.x.m(3,:,:))'; vk = squeeze(kernels.x.v(3,:,:))'; ha = subplot(2,1,2,'parent',hf); diff --git a/subfunctions/demo_influenceLearning.m b/demos/3_behavioural/demo_influenceLearning.m similarity index 87% rename from subfunctions/demo_influenceLearning.m rename to demos/3_behavioural/demo_influenceLearning.m index d22cb419..e09a555a 100644 --- a/subfunctions/demo_influenceLearning.m +++ b/demos/3_behavioural/demo_influenceLearning.m @@ -30,7 +30,7 @@ theta = [1;1;0]; % weight (PE1), weight (PE2), opponent's temp phi = [-1;0]; % (log-) temperature, bias N = 50; % number of trials -o = VBA_bernoulli(.75,N)'; % opponent's choices (here dummy binomial sampling) +o = VBA_random ('Bernoulli', 0.75, 1, N); % opponent's choices (here dummy binomial sampling) a = NaN(1,N); gx = NaN(1,N); x = zeros(dim.n,N+1); @@ -45,12 +45,12 @@ %% invert "influence learning" model given sequence of agent's choices options.skipf = zeros(1,N); options.skipf(1) = 1; -options.binomial = 1; +options.sources = struct('type',1 ,'out', 1); % one binomial observation; options.priors.SigmaTheta = 1e2*eye(dim.n_theta); f_fname = @f_Hampton; g_fname = @g_Hampton; u = [zeros(2,1),[o;a]]; -[posterior,out] = VBA_hyperparameters(a,u,f_fname,g_fname,dim,options); +[posterior,out] = VBA_NLStateSpaceModel(a,u,f_fname,g_fname,dim,options); diff --git a/subfunctions/demo_recur.m b/demos/3_behavioural/demo_recur.m similarity index 93% rename from subfunctions/demo_recur.m rename to demos/3_behavioural/demo_recur.m index 974eb060..0ac4e283 100644 --- a/subfunctions/demo_recur.m +++ b/demos/3_behavioural/demo_recur.m @@ -31,7 +31,7 @@ theta = [theta;-2]; end N = 50; % number of trials -o = VBA_bernoulli(.85,N)'; % opponent's choices (here dummy binomial sampling) +o = VBA_random ('Bernoulli', 0.85, 1, N); % opponent's choices (here dummy binomial sampling) tic a = NaN(1,N); % agent's choices gx = NaN(1,N); @@ -69,7 +69,7 @@ %% invert k-ToM model options.skipf = zeros(1,N); options.skipf(1) = 1; -options.binomial = 1; +options.sources = struct('type',1 ,'out', 1); % one binomial observation; options.DisplayWin = 1; options.priors.SigmaTheta = 1e2*eye(dim.n_theta); % relax evol param options.priors.SigmaX0 = 0*eye(dim.n); diff --git a/subfunctions/demo_volatileVB.m b/demos/3_behavioural/demo_volatileVB.m similarity index 90% rename from subfunctions/demo_volatileVB.m rename to demos/3_behavioural/demo_volatileVB.m index d9050e42..c202cce2 100755 --- a/subfunctions/demo_volatileVB.m +++ b/demos/3_behavioural/demo_volatileVB.m @@ -33,19 +33,19 @@ inG.respmod = 'fixedForm'; % or 'taylor'; x0 = repmat([0;0;0;0;0],2,1); u = zeros(2,size(fb.inH.u0,2)+1); -options.binomial = 1; +options.sources = struct('type',1 ,'out', 1); % one binomial observation; options.inF = inF; options.inG = inG; options.skipf = zeros(1,length(u)); options.skipf(1) = 1; % apply identity mapping from x0 to x1. -[y,x,x0,eta,e,u] = simulateNLSS(length(u),f_fname,g_fname,theta,phi,u,Inf,Inf,options,x0,fb); +[y,x,x0,eta,e,u] = VBA_simulate (length(u),f_fname,g_fname,theta,phi,u,Inf,Inf,options,x0,fb); figure plot(y-e,'r') hold on plot(y,'kx') legend({'p(y=1|theta,phi,m)','binomial data samples'}) -getSubplots +VBA_getSubplots (); % pause dummy.options = options; diff --git a/subfunctions/demo_wsls.m b/demos/3_behavioural/demo_wsls.m similarity index 92% rename from subfunctions/demo_wsls.m rename to demos/3_behavioural/demo_wsls.m index f17433b5..f6a4925e 100644 --- a/subfunctions/demo_wsls.m +++ b/demos/3_behavioural/demo_wsls.m @@ -33,7 +33,7 @@ priors.a_alpha = Inf; priors.b_alpha = 0; options.priors = priors; -options.binomial = 1; +options.sources.type = 1; % one binomial observation; options.verbose = 0; tau = 8; @@ -56,7 +56,7 @@ options.skipf = zeros(1,n_t); options.skipf(1) = 1; % apply identity mapping from x0 to x1. - [y,x,x0,eta,e,u] = simulateNLSS(n_t,f_fname,g_fname,[],phi,u,Inf,Inf,options,x0,fb); + [y,x,x0,eta,e,u] = VBA_simulate (n_t,f_fname,g_fname,[],phi,u,Inf,Inf,options,x0,fb); % % figure % plot(y-e,'r') @@ -65,7 +65,7 @@ % legend({'p(y=1|theta,phi,m)','binomial data samples'}) % [p0,o0] = VBA_NLStateSpaceModel(y,u,f_fname,g_fname,dim,options); % displayResults(p0,o0,y,x,x0,[],phi,Inf,Inf) -% getSubplots +% VBA_getSubplots (); % pause @@ -94,7 +94,7 @@ d.n_phi = tau*nu +1; opt.priors.muPhi = zeros(d.n_phi,1); opt.priors.SigmaPhi = 1e1*eye(d.n_phi); - opt.binomial = 1; + opt.sources.type = 1; opt.DisplayWin = 0; opt.verbose = 0; [posterior,out] = VBA_NLStateSpaceModel(y',[],[],@g_convSig,d,opt); diff --git a/subfunctions/compHRFs.m b/demos/4_neural/compHRFs.m similarity index 100% rename from subfunctions/compHRFs.m rename to demos/4_neural/compHRFs.m diff --git a/subfunctions/demo_2DneuralField.m b/demos/4_neural/demo_2DneuralField.m similarity index 93% rename from subfunctions/demo_2DneuralField.m rename to demos/4_neural/demo_2DneuralField.m index 606258da..a06c942d 100644 --- a/subfunctions/demo_2DneuralField.m +++ b/demos/4_neural/demo_2DneuralField.m @@ -64,10 +64,10 @@ % Build time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,priors.muX0); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,priors.muX0); % display time series of hidden states and observations -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); % disp('--paused--') % pause @@ -83,7 +83,7 @@ % Make predictions try options = out.options; - [xs,ys,xhat,vx,yhat,vy] = comparePredictions(... + [xs,ys,xhat,vx,yhat,vy] = VBA_comparePredictions(... n_t,theta,phi,zeros(size(u)),alpha,sigma,options,posterior,dim); catch disp('------!!Unable to form predictions!!------') diff --git a/subfunctions/demo_CaBBI_FHN.m b/demos/4_neural/demo_CaBBI_FHN.m similarity index 100% rename from subfunctions/demo_CaBBI_FHN.m rename to demos/4_neural/demo_CaBBI_FHN.m diff --git a/subfunctions/demo_CaBBI_QGIF.m b/demos/4_neural/demo_CaBBI_QGIF.m similarity index 100% rename from subfunctions/demo_CaBBI_QGIF.m rename to demos/4_neural/demo_CaBBI_QGIF.m diff --git a/subfunctions/demo_FHN.m b/demos/4_neural/demo_FHN.m similarity index 93% rename from subfunctions/demo_FHN.m rename to demos/4_neural/demo_FHN.m index 2f5994fa..5142356a 100644 --- a/subfunctions/demo_FHN.m +++ b/demos/4_neural/demo_FHN.m @@ -36,10 +36,10 @@ theta = [0.4;0.1*ones(3,1)]; % Build time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0); % display time series of hidden states and observations -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); hf = figure('color',[1 1 1]); diff --git a/subfunctions/demo_HRF.m b/demos/4_neural/demo_HRF.m similarity index 97% rename from subfunctions/demo_HRF.m rename to demos/4_neural/demo_HRF.m index e4756b7c..96180937 100644 --- a/subfunctions/demo_HRF.m +++ b/demos/4_neural/demo_HRF.m @@ -66,7 +66,7 @@ dim.n = 4; % Simulate time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(... +[y,x,x0,eta,e] = VBA_simulate (... n_t,... f_fname,... g_fname,... @@ -77,7 +77,7 @@ sigma,... options,... priors.muX0); -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); % Call inversion routine [posterior,out] = VBA_NLStateSpaceModel(y,u,f_fname,g_fname,dim,options); diff --git a/subfunctions/demo_HRF_distributed.m b/demos/4_neural/demo_HRF_distributed.m similarity index 91% rename from subfunctions/demo_HRF_distributed.m rename to demos/4_neural/demo_HRF_distributed.m index 3f594bd2..1b965318 100644 --- a/subfunctions/demo_HRF_distributed.m +++ b/demos/4_neural/demo_HRF_distributed.m @@ -61,10 +61,10 @@ % options.checkGrads = 1; % Simulate time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); % Display simulated time series -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); % disp('--paused--') % pause @@ -81,7 +81,7 @@ % Make predictions try options = out.options; - [xs,ys,xhat,vx,yhat,vy] = comparePredictions(n_t,theta,phi,u,alpha,sigma,options,posterior,dim); + [xs,ys,xhat,vx,yhat,vy] = VBA_comparePredictions(n_t,theta,phi,u,alpha,sigma,options,posterior,dim); catch disp('------!!Unable to form predictions!!------') end diff --git a/subfunctions/demo_HRF_dummy.m b/demos/4_neural/demo_HRF_dummy.m similarity index 100% rename from subfunctions/demo_HRF_dummy.m rename to demos/4_neural/demo_HRF_dummy.m diff --git a/demos/4_neural/demo_HodgkinHuxley.m b/demos/4_neural/demo_HodgkinHuxley.m new file mode 100644 index 00000000..6110923a --- /dev/null +++ b/demos/4_neural/demo_HodgkinHuxley.m @@ -0,0 +1,114 @@ +function [posterior, out] = demo_HodgkinHuxley () +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% [posterior, out] = demo_HodgkinHuxley () +% Demo of Q-learning simulation and inference +% +% Demo of the Hodgin-Huxley neuronal model. +% +% Background: +% ~~~~~~~~~~~ + +% Input current (u) are sent to a neuron that responds according to the +% Hodgkin-Huxley model. In brief, an action potential is generated when the +% membrane depolarization reaches a critical threshold. An AP +% approximately corresponds to a 80mV depolarization. +% This demo simulates the response of such a neuron, and then inverts the +% model. Emphasis is put on the identifiability of model parameters +% (e.g. K/Na conductances). +% +% ///////////////////////////////////////////////////////////////////////// + +% Basic settings for simulations +% ========================================================================= +% Sampling rate +delta_t = 1 / 10; % 10Hz +% Recording duration +n_t = 500; + +% number of integration timesteps between two observation +options.decim = 3; % smaller means faster but high risk of numerical errors + +% specify model +% ========================================================================= +% evolution function +% ------------------------------------------------------------------------- +f_fname = @f_HH; + +% stepsize of the numerical integration +options.inF.delta_t = delta_t / options.decim; + +% observation function +% ------------------------------------------------------------------------- +g_fname = @g_Id; + +% only membrane potential is measured +options.inG.ind = 1; + +% Simulation +% ========================================================================= +% input currents from random spikes +pSpike = 0.05; +spikeAmp = 100; +u = spikeAmp * VBA_random ('Bernoulli', pSpike, 1, n_t); + +% Parameters of the simulation +steadyState = [0; - 2.8; - 0.75; 0.4]; % asymptotic state with 0 input +theta = 0.1 * randn (4, 1); % evolution parameters, 0 = default HH params +phi = []; +alpha = Inf; +sigma = 1e2; + +% simulate +[y, x, x0, eta, e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,steadyState); + +% display +displayHH (u, x); + +% Inversion: parameter recovery +% ========================================================================= +% dimensions of the problem +dim.n = 4; +dim.n_theta = 4; +dim.n_phi = 0; + +% priors +options.priors.muX0 = steadyState; + +% estimation routine +[posterior,out] = VBA_NLStateSpaceModel(y,u,f_fname,g_fname,dim,options); + +% display +displayResults(posterior,out,y,x,x0,theta,phi,alpha,sigma); + +end + +%% subfunctions +% ######################################################################### +function displayHH (u, x) + +hf = figure('color',[1 1 1]); +ha = subplot(3,1,1,'parent',hf,'nextplot','add'); +plot(ha,u') +title(ha,'input current') +xlabel('time') +ha = subplot(3,1,2,'parent',hf,'nextplot','add'); +col = getColors(2); +m = VBA_sigmoid(x(2,:)); +n = VBA_sigmoid(x(3,:)); +h = VBA_sigmoid(x(4,:)); +p(1,:) = m.^3.*h; +p(2,:) = n.^4; +for i=1:2 + plot(ha,p(i,:),'color',col(i,:)); +end +title(ha,'ion channels opening probabilities') +xlabel('time') +legend({'Na','K'}) +ha = subplot(3,1,3,'parent',hf,'nextplot','add'); +plot(ha,x(1,:)) +title(ha,'output membrane depolarization (mV)') +xlabel('time') +end + + diff --git a/subfunctions/demo_PSP.m b/demos/4_neural/demo_PSP.m similarity index 76% rename from subfunctions/demo_PSP.m rename to demos/4_neural/demo_PSP.m index dd5ad32d..b6ca3683 100644 --- a/subfunctions/demo_PSP.m +++ b/demos/4_neural/demo_PSP.m @@ -24,9 +24,9 @@ x0 = [0;0]; -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,Inf,Inf,options,x0); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,Inf,Inf,options,x0); -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); diff --git a/subfunctions/demo_behaviouralDCM.m b/demos/4_neural/demo_behaviouralDCM.m similarity index 94% rename from subfunctions/demo_behaviouralDCM.m rename to demos/4_neural/demo_behaviouralDCM.m index 303f0680..f7a3a502 100644 --- a/subfunctions/demo_behaviouralDCM.m +++ b/demos/4_neural/demo_behaviouralDCM.m @@ -121,7 +121,7 @@ %--- Simulate time series of hidden states and observations disp('*** Simulation'); -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,zeros(dim.n,1)); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,zeros(dim.n,1)); f=figure('Color','w'); @@ -145,7 +145,7 @@ disp('*** Inversion'); options.priors = getPriors(nreg,n_t,options,1,0); -[options.priors.a_sigma, options.priors.b_sigma] = getHyperpriors(y(1:2,:),0.05,0.25) ; +[options.priors.a_sigma, options.priors.b_sigma] = VBA_guessHyperpriors(y(1:2,:),[0.05, 0.25]) ; [posterior,out] = VBA_NLStateSpaceModel(y,u,f_fname,g_fname,dim,options); set(0,'CurrentFigure',f); diff --git a/subfunctions/demo_dcm4fmri.m b/demos/4_neural/demo_dcm4fmri.m similarity index 95% rename from subfunctions/demo_dcm4fmri.m rename to demos/4_neural/demo_dcm4fmri.m index a21a330a..357d7ceb 100644 --- a/subfunctions/demo_dcm4fmri.m +++ b/demos/4_neural/demo_dcm4fmri.m @@ -104,8 +104,8 @@ %--- Simulate time series of hidden states and observations x = NaN; -while isweird(x) - [y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); +while VBA_isWeird (x) + [y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); end % add in confounds @@ -150,7 +150,7 @@ %--- Make predictions try options = out.options; - [xs,ys,xhat,vx,yhat,vy] = comparePredictions(n_t,theta,phi,u,alpha,sigma,options,posterior,dim); + [xs,ys,xhat,vx,yhat,vy] = VBA_comparePredictions(n_t,theta,phi,u,alpha,sigma,options,posterior,dim); catch disp('------!!Unable to form predictions!!------') end diff --git a/subfunctions/demo_dcm4fmri_distributed.m b/demos/4_neural/demo_dcm4fmri_distributed.m similarity index 96% rename from subfunctions/demo_dcm4fmri_distributed.m rename to demos/4_neural/demo_dcm4fmri_distributed.m index c0f7454a..c5fcae67 100644 --- a/subfunctions/demo_dcm4fmri_distributed.m +++ b/demos/4_neural/demo_dcm4fmri_distributed.m @@ -58,7 +58,7 @@ g_fname = @g_HRF_distributed; [options] = prepare_fullDCM(A,B,C,D,TR,microDT); options.inG.n_phi = 4; % number of spatial modes -options.inG.B = sigm(randn(8,options.inG.n_phi),struct('mat',1)); % spatial modes +options.inG.B = VBA_sigmoid(randn(8,options.inG.n_phi)); % spatial modes options.inG.ind_hrf = 1:2*nreg; options.inG.n_reg = nreg; for i=1:nreg @@ -188,10 +188,10 @@ phi = [ phi(:) ; p_w ]; % Build time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); % display time series of hidden states and observations -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); % disp('--paused--') % pause @@ -207,7 +207,7 @@ % Make predictions try options = out.options; - [xs,ys,xhat,vx,yhat,vy] = comparePredictions(... + [xs,ys,xhat,vx,yhat,vy] = VBA_comparePredictions(... n_t,theta,phi,u,alpha,sigma,options,posterior,dim); catch disp('------!!Unable to form predictions!!------') diff --git a/subfunctions/demo_dcm_1region.m b/demos/4_neural/demo_dcm_1region.m similarity index 95% rename from subfunctions/demo_dcm_1region.m rename to demos/4_neural/demo_dcm_1region.m index 61d05497..22aa1a04 100644 --- a/subfunctions/demo_dcm_1region.m +++ b/demos/4_neural/demo_dcm_1region.m @@ -113,10 +113,10 @@ %--- Simulate time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); % display time series of hidden states and observations -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); % disp('--paused--') % pause @@ -132,7 +132,7 @@ %--- Make predictions try options = out.options; - [xs,ys,xhat,vx,yhat,vy] = comparePredictions(... + [xs,ys,xhat,vx,yhat,vy] = VBA_comparePredictions(... n_t,theta,phi,u,alpha,sigma,options,posterior,dim); catch disp('------!!Unable to form predictions!!------') diff --git a/subfunctions/demo_dcm_motorPremotor.m b/demos/4_neural/demo_dcm_motorPremotor.m similarity index 97% rename from subfunctions/demo_dcm_motorPremotor.m rename to demos/4_neural/demo_dcm_motorPremotor.m index eebb8f36..e8b9d618 100755 --- a/subfunctions/demo_dcm_motorPremotor.m +++ b/demos/4_neural/demo_dcm_motorPremotor.m @@ -93,7 +93,7 @@ %--- Simulate time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); displaySimulations(y,x,eta,e); figure,imagesc(u) figure,plot(x(options.inF.n5,:)') diff --git a/subfunctions/demo_dcmonline.m b/demos/4_neural/demo_dcmonline.m similarity index 89% rename from subfunctions/demo_dcmonline.m rename to demos/4_neural/demo_dcmonline.m index ce2e1a94..51a8908f 100644 --- a/subfunctions/demo_dcmonline.m +++ b/demos/4_neural/demo_dcmonline.m @@ -114,6 +114,7 @@ vb = zeros(1,nblocks); % on-line experiment + for tt=1:nblocks % 1- find best design given current information @@ -129,16 +130,26 @@ U(:,(tt-1)*n_t+1:tt*n_t) = u{ind}; fprintf(1,['Optimizing design (block ',num2str(tt),')... OK.']) fprintf(1,'\n') - plot(ha,tt,e(:,tt)','o') - legend(ha,{'u_{att} = off','u_{att} = on'}) + plot(ha,tt,e(1,tt)','ro') + plot(ha,tt,e(2,tt)','go') + legend(ha,{'u_{att} = off','u_{att} = on'},'Location','southeast','Orientation','horizontal') % 2- simulate BOLD response to chosen design under true model fprintf(1,['-- Simulating data (block ',num2str(tt),')... ']) - [y,x,x0,eta,ee] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u{ind},alpha,sigma,o4design{truemodel},x0); + [y,x,x0,eta,ee] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u{ind},alpha,sigma,o4design{truemodel},x0); Y(:,(tt-1)*n_t+1:tt*n_t) = y; x0 = x(:,end); - plot(ha2,(tt-1)*n_t+1:tt*n_t,y') - legend(ha2,{'V1','V5'}) + + try + set(pl(1), 'XData', 1 : tt*n_t); + set(pl(1), 'YData', [get(pl(1), 'YData') y(1,:)]); + set(pl(2), 'XData', 1 : tt*n_t); + set(pl(2), 'YData', [get(pl(2), 'YData') y(2,:)]); + catch + pl(1) = plot(ha2,1:n_t,y(1,:),'m'); + pl(2) = plot(ha2,1:n_t,y(2,:),'b'); + end + legend(ha2,{'V1','V5'},'Location','southeast','Orientation','horizontal') fprintf(1,[' OK.']) fprintf(1,'\n') drawnow @@ -163,7 +174,7 @@ OUT(j,tt).out = out; OUT(j,tt).posterior = posterior; end - plot(ha3,tt,F(1,tt)-F(2,tt),'o') + plot(ha3,tt,F(1,tt)-F(2,tt),'k*') eb(tt) = OUT(1,tt).posterior.muTheta(OUT(1,tt).out.options.inF.indB{2}); vb(tt) = OUT(1,tt).posterior.SigmaTheta(OUT(1,tt).out.options.inF.indB{2},OUT(1,tt).out.options.inF.indB{2}); diff --git a/subfunctions/demo_fitzhugh.m b/demos/4_neural/demo_fitzhugh.m similarity index 94% rename from subfunctions/demo_fitzhugh.m rename to demos/4_neural/demo_fitzhugh.m index d82c2b92..910177ff 100644 --- a/subfunctions/demo_fitzhugh.m +++ b/demos/4_neural/demo_fitzhugh.m @@ -33,10 +33,10 @@ % Build time series of hidden states and observations x0 = [0;0.8]; -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0); % display time series of hidden states and observations -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); % disp('--paused--') % pause diff --git a/subfunctions/demo_lin2D.m b/demos/4_neural/demo_lin2D.m similarity index 85% rename from subfunctions/demo_lin2D.m rename to demos/4_neural/demo_lin2D.m index cfdf2fd6..3330fe12 100644 --- a/subfunctions/demo_lin2D.m +++ b/demos/4_neural/demo_lin2D.m @@ -4,7 +4,7 @@ clear all close all -nt = 5e2; +nt = 3e2; f_fname = @f_2d; g_fname = @g_Id; @@ -27,12 +27,12 @@ dim.p = 2; % Build time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(nt,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0); +[y,x,x0,eta,e] = VBA_simulate (nt,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0); % display time series of hidden states and observations -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); options.priors.a_alpha = 1; options.priors.b_alpha = 1; diff --git a/subfunctions/demo_micro2macro.m b/demos/4_neural/demo_micro2macro.m similarity index 97% rename from subfunctions/demo_micro2macro.m rename to demos/4_neural/demo_micro2macro.m index c3be2060..6bce2f25 100644 --- a/subfunctions/demo_micro2macro.m +++ b/demos/4_neural/demo_micro2macro.m @@ -24,7 +24,7 @@ dx = 2e-2; x = -0.5:dx:2; -sx = sigm(x); +sx = VBA_sigmoid(x); Ux = 2*sx-1; d2Udx2 = 2*sx.*(1-sx).*(1-2.*sx); @@ -88,7 +88,7 @@ for i=1:length(mu) for ii=1:N X = mu(i) + sqrt(v).*randn; - EU2(ii,i) = 2*sigm(X)-1; + EU2(ii,i) = 2*VBA_sigmoid(X)-1; end end EU2 = mean(EU2,1); @@ -99,5 +99,5 @@ plot(ha,Ux,EU2), title(ha,'E[U(x)] vs U(E[x])') -getSubplots +VBA_getSubplots (); diff --git a/subfunctions/demo_negfeedback.m b/demos/4_neural/demo_negfeedback.m similarity index 98% rename from subfunctions/demo_negfeedback.m rename to demos/4_neural/demo_negfeedback.m index 9a462382..b12fbfc1 100644 --- a/subfunctions/demo_negfeedback.m +++ b/demos/4_neural/demo_negfeedback.m @@ -145,7 +145,7 @@ % === Simulate time series of hidden states and observations disp('*** Simulation'); -[y,x,x0,eta,e,u] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); +[y,x,x0,eta,e,u] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); [hf] = displaySimulations(y,x,eta,e) diff --git a/subfunctions/demo_stability_HRF.m b/demos/4_neural/demo_stability_HRF.m similarity index 96% rename from subfunctions/demo_stability_HRF.m rename to demos/4_neural/demo_stability_HRF.m index c5aba5f6..ecd5cdde 100644 --- a/subfunctions/demo_stability_HRF.m +++ b/demos/4_neural/demo_stability_HRF.m @@ -13,12 +13,12 @@ try - load phi.mat + load demo_stability_hrf.mat % get max eigenvalue P = [0;0;0;0;0;0]; for i=1:n for j=1:n - J = numericDiff(@f_HRF,1,[xg1(i);xg2(j);0;0],P,0,[]); + J = VBA_numericDiff(@f_HRF,1,[xg1(i);xg2(j);0;0],P,0,[]); lm(i,j) = max(real(eig(J))); end end @@ -62,7 +62,7 @@ P = [0;0;kaf(l);kas(k);0;0]; for i=1:n for j=1:n - J = numericDiff(@f_HRF,1,[xg1(i);xg2(j);0;0],P,0,[]); + J = VBA_numericDiff(@f_HRF,1,[xg1(i);xg2(j);0;0],P,0,[]); lm(i,j) = max(real(eig(J))); end end diff --git a/subfunctions/phi.mat b/demos/4_neural/demo_stability_hrf.mat similarity index 100% rename from subfunctions/phi.mat rename to demos/4_neural/demo_stability_hrf.mat diff --git a/classification/BMM/demo_BMM.m b/demos/5_classification/demo_BMM.m similarity index 100% rename from classification/BMM/demo_BMM.m rename to demos/5_classification/demo_BMM.m diff --git a/classification/CRP/demo_DP.m b/demos/5_classification/demo_DP.m similarity index 100% rename from classification/CRP/demo_DP.m rename to demos/5_classification/demo_DP.m diff --git a/classification/GMM/demo_GMM.m b/demos/5_classification/demo_GMM.m similarity index 100% rename from classification/GMM/demo_GMM.m rename to demos/5_classification/demo_GMM.m diff --git a/subfunctions/demo_Henon.m b/demos/5_classification/demo_Henon.m similarity index 78% rename from subfunctions/demo_Henon.m rename to demos/5_classification/demo_Henon.m index e75b3980..cc97efa6 100644 --- a/subfunctions/demo_Henon.m +++ b/demos/5_classification/demo_Henon.m @@ -13,12 +13,12 @@ theta = [1.4;0.3]; phi = []; inF.deltat = 1; -inG.G0 = 50; -inG.beta = 0.1; +inG.scale = 50; +inG.slope = 0.1; % Build options structure for temporal integration of SDE options.inF = inF; -options.inG = []; +options.inG = struct; % options.u0 = 0*ones(2,1); % initial condition: input value @@ -49,14 +49,19 @@ % Build time series of hidden states and observations -x = NaN; -while any(isweird(x(:))) - [y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); +ok = false; +while ~ ok + try + [y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); + ok = true; + catch + fprintf('Simulation produced weird values, let''s try again.\n'); + end end % display time series of hidden states and observations -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); % disp('--paused--') % pause @@ -72,7 +77,7 @@ % Make predictions try options = out.options; - [xs,ys,xhat,vx,yhat,vy] = comparePredictions(... + [xs,ys,xhat,vx,yhat,vy] = VBA_comparePredictions(... n_t,theta,phi,u,alpha,sigma,options,posterior,dim); catch disp('------!!Unable to form predictions!!------') diff --git a/subfunctions/demo_bin.m b/demos/5_classification/demo_bin.m similarity index 94% rename from subfunctions/demo_bin.m rename to demos/5_classification/demo_bin.m index a2f96eb2..ec0405a9 100644 --- a/subfunctions/demo_bin.m +++ b/demos/5_classification/demo_bin.m @@ -11,7 +11,7 @@ g_fname = @g_classif; -options.binomial = 1; +options.sources = struct('type',1 ,'out', 1); % one binomial observation; options.priors.muPhi = zeros(dim.n_phi,1); options.priors.SigmaPhi = 1e0*eye(dim.n_phi); options.isYout = zeros(dim.p,1); @@ -33,7 +33,7 @@ for i=1:2 % simulate data with and without real mapping phi = (2-i)*randn(dim.n_phi,1); - [y,x,x0,eta,e] = simulateNLSS(dim.n_t,[],g_fname,[],phi,[],[],[],options); + [y,x,x0,eta,e] = VBA_simulate (dim.n_t,[],g_fname,[],phi,[],[],[],options); g = y-e; g = g>0.5; % denoised data mner(i,ii) = sum(g.*y + (1-g).*(1-y))./dim.p; % max performance rate @@ -106,7 +106,7 @@ end -getSubplots +VBA_getSubplots (); diff --git a/subfunctions/demo_classification.m b/demos/5_classification/demo_classification.m similarity index 96% rename from subfunctions/demo_classification.m rename to demos/5_classification/demo_classification.m index da12903d..9eb59dcf 100644 --- a/subfunctions/demo_classification.m +++ b/demos/5_classification/demo_classification.m @@ -15,7 +15,7 @@ X = randn(n,p); % feature matrix b = 1+randn(p,1); % feature weights e = randn(n,1); % additional noise -y = +(sig(X*b+e)>0.5); +y = +(VBA_sigmoid(X*b+e)>0.5); % classify data using default set-up k = n; % number of folds (k=n: leave-one-out cross-validation) @@ -41,7 +41,7 @@ X = randn(n,p); % feature matrix b = ones(p,1); % feature weights e = randn(n,1); % additional noise - y = sig(X*b+e)>0.5; + y = +(VBA_sigmoid(X*b+e)>0.5); % apply classifier for j=1:length(v) options.priors.SigmaPhi = v(j).*eye(p); diff --git a/subfunctions/demo_2Dlin.m b/demos/6_physics/demo_2Dlin.m similarity index 89% rename from subfunctions/demo_2Dlin.m rename to demos/6_physics/demo_2Dlin.m index ab454735..029c9257 100644 --- a/subfunctions/demo_2Dlin.m +++ b/demos/6_physics/demo_2Dlin.m @@ -49,10 +49,10 @@ % options.checkGrads = 1; % Build time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,priors.muX0); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,priors.muX0); % display time series of hidden states and observations -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); % Call inversion routine % [posterior,out] = VBA_onlineWrapper(y,u,f_fname,g_fname,dim,options); @@ -64,7 +64,7 @@ % Make predictions try options = out.options; - [xs,ys,xhat,vx,yhat,vy] = comparePredictions(... + [xs,ys,xhat,vx,yhat,vy] = VBA_comparePredictions(... n_t,theta,phi,u,alpha,sigma,options,posterior,dim); catch disp('------!!Unable to form predictions!!------') diff --git a/subfunctions/demo_LV2D.m b/demos/6_physics/demo_LV2D.m similarity index 80% rename from subfunctions/demo_LV2D.m rename to demos/6_physics/demo_LV2D.m index f350cffd..2a15cfdb 100644 --- a/subfunctions/demo_LV2D.m +++ b/demos/6_physics/demo_LV2D.m @@ -40,15 +40,15 @@ % Build time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); -if isweird(y) - disp('Warning: problem with simulated data: re-run the demo!') - return +ok = false; +while ~ ok + [y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); + ok = ~ VBA_isWeird ({x, y}); end % display time series of hidden states and observations -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); % invert model @@ -61,7 +61,7 @@ % check predictions try options = out.options; - [xs,ys,xhat,vx,yhat,vy] = comparePredictions(n_t,theta,phi,u,alpha,sigma,options,posterior,dim); + [xs,ys,xhat,vx,yhat,vy] = VBA_comparePredictions(n_t,theta,phi,u,alpha,sigma,options,posterior,dim); catch disp('------!!Unable to form predictions!!------') end diff --git a/subfunctions/demo_Lorenz.m b/demos/6_physics/demo_Lorenz.m similarity index 88% rename from subfunctions/demo_Lorenz.m rename to demos/6_physics/demo_Lorenz.m index d94de352..b405a3ae 100644 --- a/subfunctions/demo_Lorenz.m +++ b/demos/6_physics/demo_Lorenz.m @@ -6,7 +6,7 @@ close all % Choose basic settings for simulations -n_t = 5e2; +n_t = 3e2; deltat = 2e-2; alpha = 1e0/deltat; sigma = 1e-1; @@ -18,8 +18,9 @@ % Build options structure for temporal integration of SDE -inG.G0 = 50; -inG.beta = 0.2; +inG.scale = 50; +inG.slope = 0.2; + inF.deltat = deltat; options.inF = inF; options.inG = inG; @@ -51,10 +52,10 @@ options.checkGrads = 0; % Build time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); % display time series of hidden states and observations -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); % disp('--paused--') % pause @@ -70,7 +71,7 @@ % Make predictions try options = out.options; - [xs,ys,xhat,vx,yhat,vy] = comparePredictions(... + [xs,ys,xhat,vx,yhat,vy] = VBA_comparePredictions(... n_t,theta,phi,u,alpha,sigma,options,posterior,dim); catch disp('------!!Unable to form predictions!!------') diff --git a/subfunctions/demo_Oscillatory.m b/demos/6_physics/demo_Oscillatory.m similarity index 89% rename from subfunctions/demo_Oscillatory.m rename to demos/6_physics/demo_Oscillatory.m index e05dacf1..8bacd4dd 100644 --- a/subfunctions/demo_Oscillatory.m +++ b/demos/6_physics/demo_Oscillatory.m @@ -17,8 +17,7 @@ inF.deltat = delta_t; inF.a = 0.1; inF.b = 0.9e-2; -inG.G0 = 2; -inG.beta = 1; +inG.scale = 2; inG.y0 = -1; inG.ind = 1; % only x(1) is partially observable options.inF = inF; @@ -56,10 +55,10 @@ % options.checkGrads = 1; % Build time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); % display time series of hidden states and observations -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); % disp('--paused--') % pause @@ -73,7 +72,7 @@ % Make predictions try options = out.options; - [xs,ys,xhat,vx,yhat,vy] = comparePredictions(... + [xs,ys,xhat,vx,yhat,vy] = VBA_comparePredictions(... n_t,theta,phi,u,alpha,sigma,options,posterior,dim); catch disp('------!!Unable to form predictions!!------') diff --git a/subfunctions/demo_Rossler.m b/demos/6_physics/demo_Rossler.m similarity index 81% rename from subfunctions/demo_Rossler.m rename to demos/6_physics/demo_Rossler.m index a9b72fca..9defd9b1 100644 --- a/subfunctions/demo_Rossler.m +++ b/demos/6_physics/demo_Rossler.m @@ -9,8 +9,8 @@ f_fname = @f_Rossler; g_fname = @g_sigmoid; u = []; -n_t = 5e2; -deltat = 4e-2; +n_t = 500; +deltat = 5e-2; alpha = 1e3; sigma = 1e3; theta = [0.2;0.2;2.791]; @@ -20,8 +20,8 @@ % Build options structure for temporal integration of SDE inF.deltat = deltat; options.inF = inF; +options.inG = struct; options.backwardLag = 4; -% options.checkGrads = 1; % Build priors for model inversion priors.muX0 = 0*[1;1;1]; @@ -42,10 +42,10 @@ % Build time series of hidden states and observations stop = 0; it = 1; -itmax = 10; +itmax = 15; while ~stop - [y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); - if (~isweird(y) && ~isweird(x)) || it >= itmax + [y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); + if ~ VBA_isWeird ({x, y}) || it >= itmax stop = 1; else it = it+1; @@ -53,14 +53,14 @@ end % display time series of hidden states and observations -displaySimulations(y,x,eta,e) -getSubplots +displaySimulations(y,x,eta,e); +VBA_getSubplots (); % disp('--paused--') % pause % Call inversion routine -if (~isweird(y) && ~isweird(x)) +if ~ VBA_isWeird ({x, y}) [posterior,out] = VBA_NLStateSpaceModel(y,u,f_fname,g_fname,dim,options); @@ -70,7 +70,7 @@ % Make predictions try options = out.options; - [xs,ys,xhat,vx,yhat,vy] = comparePredictions(... + [xs,ys,xhat,vx,yhat,vy] = VBA_comparePredictions(... n_t,theta,phi,u,alpha,sigma,options,posterior,dim); catch disp('------!!Unable to form predictions!!------') diff --git a/subfunctions/demo_SHC.m b/demos/6_physics/demo_SHC.m similarity index 89% rename from subfunctions/demo_SHC.m rename to demos/6_physics/demo_SHC.m index e38a5630..5ee3e74f 100644 --- a/subfunctions/demo_SHC.m +++ b/demos/6_physics/demo_SHC.m @@ -52,10 +52,10 @@ % Build time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); % display time series of hidden states and observations -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); % disp('--paused--') % pause @@ -69,7 +69,7 @@ % Make predictions try options = out.options; - [xs,ys,xhat,vx,yhat,vy] = comparePredictions(... + [xs,ys,xhat,vx,yhat,vy] = VBA_comparePredictions(... n_t,theta,phi,zeros(size(u)),alpha,sigma,options,posterior,dim); catch disp('------!!Unable to form predictions!!------') diff --git a/subfunctions/demo_VanDerPol.m b/demos/6_physics/demo_VanDerPol.m similarity index 88% rename from subfunctions/demo_VanDerPol.m rename to demos/6_physics/demo_VanDerPol.m index 0188d49a..0f5893de 100644 --- a/subfunctions/demo_VanDerPol.m +++ b/demos/6_physics/demo_VanDerPol.m @@ -17,8 +17,8 @@ u = []; % Build options structure for temporal integration of SDE -inG.G0 = 50; -inG.beta = 5; +inG.scale = 50; +inG.slope = 5; inF.deltat = deltat; options.inF = inF; options.inG = inG; @@ -44,12 +44,12 @@ % Build time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); % display time series of hidden states and observations -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); % disp('--paused--') % pause @@ -68,7 +68,7 @@ % Make predictions try options = out.options; - [xs,ys,xhat,vx,yhat,vy] = comparePredictions(... + [xs,ys,xhat,vx,yhat,vy] = VBA_comparePredictions(... n_t,theta,phi,u,alpha,sigma,options,posterior,dim); catch disp('------!!Unable to form predictions!!------') diff --git a/subfunctions/demo_doubleWell.m b/demos/6_physics/demo_doubleWell.m similarity index 72% rename from subfunctions/demo_doubleWell.m rename to demos/6_physics/demo_doubleWell.m index e19fca04..4cc37aec 100644 --- a/subfunctions/demo_doubleWell.m +++ b/demos/6_physics/demo_doubleWell.m @@ -1,9 +1,8 @@ +function demo_doubleWell % Demo for Double Well system. % This demo inverts a model of chaotic double-Well system, which is % observed through a nonlinear sigmoid observation function. -clear variables -close all % Choose basic settings for simulations f_fname = @f_doubleWell; @@ -14,12 +13,13 @@ alpha = 6e-1/deltat; sigma = 1e2; theta = [-2;3;1.5]; -phi = [1;1]; +phi = [1;0.5]; % Build options structure for temporal integration of SDE inG.G0 = 50; -inG.beta = 0.5; +inG.scale = 50; + inF.deltat = deltat; inF.a = -2; inF.b = 3; @@ -35,7 +35,7 @@ priors.muTheta(3) = 1.5; priors.SigmaTheta = 1e-1*eye(3); % priors.SigmaTheta(3,3) = 0; -priors.muPhi = 1*ones(length(phi),1); +priors.muPhi = [1; .5]; priors.SigmaPhi = 0e0*eye(2); priors.SigmaPhi(1,1) = 0; priors.a_alpha = 1e2; @@ -58,10 +58,10 @@ % Build time series of hidden states and observations -[y,x,x0,eta,e] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); +[y,x,x0,eta,e] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); % display time series of hidden states and observations -displaySimulations(y,x,eta,e) +displaySimulations(y,x,eta,e); % disp('--paused--') % pause @@ -79,12 +79,37 @@ % Make predictions try options = out.options; - [xs,ys,xhat,vx,yhat,vy] = comparePredictions(... + [xs,ys,xhat,vx,yhat,vy] = VBA_comparePredictions(... n_t,theta,phi,u,alpha,sigma,options,posterior,dim); catch disp('------!!Unable to form predictions!!------') end +end + +%% ######################################################################## +function [] = show_potential(posterior) +% evaluates double well potential +theta1 = posterior.muTheta(1); +theta2 = posterior.muTheta(2); + + +x = -4:1e-3:6; +y = (x-theta2).^2.*(x-theta1).^2; + +hfig = findobj('tag','show'); + +if isempty(hfig) + hfig = figure('tag','show'); +else +% clf(hfig) +end + +figure(hfig) +ha = axes('parent',hfig); +plot(ha,x,y) + +end diff --git a/subfunctions/demo_Elogsig.m b/demos/7_mathematics/demo_Elogsig.m similarity index 50% rename from subfunctions/demo_Elogsig.m rename to demos/7_mathematics/demo_Elogsig.m index fa92ae8b..e60897bd 100644 --- a/subfunctions/demo_Elogsig.m +++ b/demos/7_mathematics/demo_Elogsig.m @@ -8,14 +8,14 @@ for j=1:length(gridV) V = gridV(j); X = m + sqrt(V).*randn(Nmc,1); - lsx1 = log(sig(X)); - lsx2 = log(1-sig(X)); + lsx1 = log(VBA_sigmoid(X)); + lsx2 = log(1-VBA_sigmoid(X)); Els1(i,j) = mean(lsx1); Els2(i,j) = mean(lsx2); - Els01(i,j) = Elogsig(m,V); - Els02(i,j) = Elogsig(-m,V); + Els01(i,j) = VBA_Elogsig(m,V); + Els02(i,j) = VBA_Elogsig(-m,V); end end -figure,plot(vec(exp(Els1)),vec(exp(Els01)),'.') -figure,plot(vec(exp(Els2)),vec(exp(Els02)),'.') \ No newline at end of file +figure,plot(VBA_vec(exp(Els1)),VBA_vec(exp(Els01)),'.') +figure,plot(VBA_vec(exp(Els2)),VBA_vec(exp(Els02)),'.') \ No newline at end of file diff --git a/subfunctions/demo_gaussian.m b/demos/7_mathematics/demo_gaussian.m similarity index 100% rename from subfunctions/demo_gaussian.m rename to demos/7_mathematics/demo_gaussian.m diff --git a/subfunctions/demo_imageRegistration.m b/demos/7_mathematics/demo_imageRegistration.m similarity index 100% rename from subfunctions/demo_imageRegistration.m rename to demos/7_mathematics/demo_imageRegistration.m diff --git a/subfunctions/demo_logNormal.m b/demos/7_mathematics/demo_logNormal.m similarity index 90% rename from subfunctions/demo_logNormal.m rename to demos/7_mathematics/demo_logNormal.m index 3e45f3c3..450c96ee 100644 --- a/subfunctions/demo_logNormal.m +++ b/demos/7_mathematics/demo_logNormal.m @@ -28,7 +28,7 @@ np = 64; lx = []; ly = [0,1e1]; -[pX,gX,pY,gY,X,Y] = get_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim,N,np,lx,ly); +[pX,gX,pY,gY,X,Y] = VBA_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim,N,np,lx,ly); figure('color',[1 1 1]) diff --git a/subfunctions/demo_prodsig.m b/demos/7_mathematics/demo_prodsig.m similarity index 87% rename from subfunctions/demo_prodsig.m rename to demos/7_mathematics/demo_prodsig.m index dff62ace..0d83fae5 100644 --- a/subfunctions/demo_prodsig.m +++ b/demos/7_mathematics/demo_prodsig.m @@ -6,7 +6,7 @@ x = -50:5e-2:50; nx = length(x); -s1 = sigm(x); +s1 = VBA_sigmoid(x); a = -2:4; % slope b = 0;%-4:4; % inflexion point @@ -31,7 +31,7 @@ for i=1:na for j=1:nb - s2 = sigm(x,[],[a(i);b(j)]); + s2 = VBA_sigmoid(x, 'slope', exp(a(i)), 'center', b(j)); s12 = s1.*s2; cla(ha) plot(ha,x,s1,'b--') @@ -39,7 +39,7 @@ plot(ha,x,s2,'g--') plot(ha,x,s12,'r') drawnow - [posterior,out] = VBA_NLStateSpaceModel(vec(s12),vec(x),[],g_fname,dim,options); + [posterior,out] = VBA_NLStateSpaceModel(VBA_vec(s12),VBA_vec(x),[],g_fname,dim,options); P(i,j,1) = posterior.muPhi(1); P(i,j,2) = posterior.muPhi(2); % pause diff --git a/subfunctions/ObsRecGen.m b/demos/_models/ObsRecGen.m similarity index 92% rename from subfunctions/ObsRecGen.m rename to demos/_models/ObsRecGen.m index 474ffaca..8d110fa8 100644 --- a/subfunctions/ObsRecGen.m +++ b/demos/_models/ObsRecGen.m @@ -33,7 +33,7 @@ mx = x(1); % E[log-odds of P(o=1)] Vx = exp(x(2)); % V[log-odds of P(o=1)] - Po = sigmoid(mx/(sqrt(1+a*Vx))); % P(o=1) + Po = VBA_sigmoid(mx/(sqrt(1+a*Vx))); % P(o=1) else @@ -42,7 +42,7 @@ % there is a constraint of normalization, ie sum_k' P(k') = 1. Thus, % one only needs to keep track of k'-1 probabilities (the last one is, % by construction, 1-sum_k' P(k')). - Pk = sigmoid(x(1:(level-1))); % P(k'), with k'=0,...,k-1 + Pk = VBA_sigmoid(x(1:(level-1))); % P(k'), with k'=0,...,k-1 Pk = [Pk;max(0,1-sum(Pk))]; % insert last P(k'=k-1) % Get P(o=1|k'). Note: the agent's prediction P(o=1|k') depends upon @@ -64,7 +64,7 @@ Sig = exp(x(indlev(j).Par(2:2:2*ntotPar))); % V[theta|k'=j-1] Vx(j) = sum(Sig.*df.^2); % V[x(theta)|k'=j-1] end - Es = sigmoid(f./sqrt(1+a*Vx)); % E[sigm(x(theta))] + Es = VBA_sigmoid(f./sqrt(1+a*Vx)); % E[sigm(x(theta))] % Get P(o=1) = sum_k P(o=1|k')*P(k') Po = Pk'*Es; % k-ToM's belief about her opponent's next move @@ -74,8 +74,8 @@ % Make decision based upon P(o=1) DV = fplayer(Po,exp(P(1)),player,game); % incentive for a=1 if length(P)==1 - gx = sigmoid(DV); % P(a=1) + gx = VBA_sigmoid(DV); % P(a=1) else % P(2) = bias - gx = sigmoid(DV+P(2)); % P(a=1) with bias + gx = VBA_sigmoid(DV+P(2)); % P(a=1) with bias end diff --git a/subfunctions/U_dummy.m b/demos/_models/U_dummy.m similarity index 100% rename from subfunctions/U_dummy.m rename to demos/_models/U_dummy.m diff --git a/subfunctions/evolution0bisND.m b/demos/_models/evolution0bisND.m similarity index 90% rename from subfunctions/evolution0bisND.m rename to demos/_models/evolution0bisND.m index ec7b739b..d0ca3e07 100644 --- a/subfunctions/evolution0bisND.m +++ b/demos/_models/evolution0bisND.m @@ -29,10 +29,10 @@ % -- learning rule -- m0 = x(1); % current E[log-odds] V0 = exp(x(2)); % current V[log-odds] - p0 = sigmoid(m0); % current estimate of P(o=1) + p0 = VBA_sigmoid(m0); % current estimate of P(o=1) volatility = exp(theta(1)); V = 1./((1./(volatility+V0))+w*p0*(1-p0)); % updated V[log-odds] m = m0 + w*V*(u(1)-p0); % updated E[log-odds] (Laplace-Kalman update rule) % wrap-up - fx = [invsigmoid(sigmoid(m));log(V)]; % for numerical purposes + fx = [VBA_sigmoid(VBA_sigmoid(m),'inverse',true);log(V)]; % for numerical purposes end \ No newline at end of file diff --git a/subfunctions/f_2DneuralField.m b/demos/_models/f_2DneuralField.m similarity index 100% rename from subfunctions/f_2DneuralField.m rename to demos/_models/f_2DneuralField.m diff --git a/subfunctions/f_2d.m b/demos/_models/f_2d.m similarity index 100% rename from subfunctions/f_2d.m rename to demos/_models/f_2d.m diff --git a/subfunctions/f_2dwu.m b/demos/_models/f_2dwu.m similarity index 100% rename from subfunctions/f_2dwu.m rename to demos/_models/f_2dwu.m diff --git a/subfunctions/f_AR.m b/demos/_models/f_AR.m similarity index 100% rename from subfunctions/f_AR.m rename to demos/_models/f_AR.m diff --git a/subfunctions/f_ARn.m b/demos/_models/f_ARn.m similarity index 75% rename from subfunctions/f_ARn.m rename to demos/_models/f_ARn.m index 80047f46..71bf1564 100755 --- a/subfunctions/f_ARn.m +++ b/demos/_models/f_ARn.m @@ -2,11 +2,7 @@ % AR(1) evolution function with exponential decay n = size(x,1); -in.G0 = 1; -in.S0 = 0; -in.beta = 1; -in.INV = 0; -[alpha,dsda] = sigm(theta(1:n),in,[]); +[alpha,dsda] = VBA_sigmoid(theta(1:n)); xf = theta(n+1:2*n); fx = x + -alpha(:).*(x-xf); diff --git a/subfunctions/f_ARplus.m b/demos/_models/f_ARplus.m similarity index 100% rename from subfunctions/f_ARplus.m rename to demos/_models/f_ARplus.m diff --git a/subfunctions/f_AVL.m b/demos/_models/f_AVL.m similarity index 95% rename from subfunctions/f_AVL.m rename to demos/_models/f_AVL.m index 47d78347..b9610bcc 100644 --- a/subfunctions/f_AVL.m +++ b/demos/_models/f_AVL.m @@ -89,7 +89,7 @@ tdf=in.tdf; % initialize VB sufficient statistics pxi = zeros(1,n+1); -pxi(1) = checkGX_binomial(1./(1+exp(-prior(2)))); +pxi(1) = VBA_finiteBinomial (1./(1+exp(-prior(2)))); switch in.flag case {1,2} Ex = zeros(1,n+1); @@ -114,18 +114,18 @@ F(1) = freeEnergy(pxi(1),Ex(:,1),Vx(:,1),u,prior,Theta,in); for i = 1:n % update xi - [sx] = checkGX_binomial(1./(1+exp(-Ex(i)))); + [sx] = VBA_finiteBinomial (1./(1+exp(-Ex(i)))); lsx = log(sx); Elsx = lsx + 0.5.*Vx(i).^2.*(sx.^2-sx); p(1) = -0.5.*(iva.*delta1 + Elsx +l2pi - Theta(1)); p(2) = -0.5.*(iva.*delta2 + Elsx-Ex(i) +l2pi - Theta(1)); p = exp(p-max(p)); - pxi(i+1) = checkGX_binomial(p(1)./sum(p)); + pxi(i+1) = VBA_finiteBinomial (p(1)./sum(p)); switch in.flag case {1,2} % update x1 opt.args = {prior(2),prior(3),pxi(i+1)}; - [out1,out2] = GaussNewton('expBinom',Ex(i),opt); + [out1,out2] = VBA_GaussNewton('expBinom',Ex(i),opt); if ~isempty(out1) Ex(i+1) = out1; Vx(i+1) = out2; @@ -142,7 +142,7 @@ mft = 0;%max([0.5.*Vx(2,i).*ee2.*(ee2-prior(3))./tmp.^3,0]); P0 = 1./tmp + mft; opt.args = {prior(2),1./P0,pxi(i+1)}; - [out1,out2] = GaussNewton('expBinom',Ex(1,i),opt); + [out1,out2] = VBA_GaussNewton('expBinom',Ex(1,i),opt); if ~isempty(out1) Ex(1,i+1) = out1; Vx(1,i+1) = out2; @@ -161,7 +161,7 @@ o.conv = 0; init = Ex(2,i); while ~o.conv - [out1,out2,o] = GaussNewton('VarVolatility',init,opt); + [out1,out2,o] = VBA_GaussNewton('VarVolatility',init,opt); init = out1; end if ~isempty(out1) @@ -209,7 +209,7 @@ iva = exp(Theta(1)); Sqp = -pxi.*log(pxi) - (1-pxi).*log(1-pxi); l2pi = log(2*pi); -[sx] = checkGX_binomial(1./(1+exp(-Ex(1,:)))); +[sx] = VBA_finiteBinomial (1./(1+exp(-Ex(1,:)))); sx = sx(:)'; lsx = log(sx); if size(Ex,1) < 2 diff --git a/subfunctions/f_BSL.m b/demos/_models/f_BSL.m similarity index 92% rename from subfunctions/f_BSL.m rename to demos/_models/f_BSL.m index b13dc3f7..210f6fcb 100644 --- a/subfunctions/f_BSL.m +++ b/demos/_models/f_BSL.m @@ -22,7 +22,7 @@ % OUT: % - fx: updated sufficient statistics of log-odds of P(y=1) -if isweird(u) % e.g., 1st trial +if VBA_isWeird (u) % e.g., 1st trial fx = x; return end @@ -49,10 +49,10 @@ indSeq = 1; end V = V0 + volatility; % by default: inflated V[log-odds] -p0 = sigmoid(m0(indSeq)); % current estimate of P(y=1) +p0 = VBA_sigmoid(m0(indSeq)); % current estimate of P(y=1) V(indSeq) = 1./((1./V(indSeq))+ w*p0*(1-p0)); % updated V[log-odds] m(indSeq) = m0(indSeq) + w*V(indSeq)*(y-p0); % updated E[log-odds] (Laplace-Kalman update rule) -fx = [invsigmoid(sigmoid(m));log(V)]; % wrap-up +fx = [VBA_sigmoid(VBA_sigmoid(m),'inverse',true);log(V)]; % wrap-up \ No newline at end of file diff --git a/subfunctions/f_BSLinGame.m b/demos/_models/f_BSLinGame.m similarity index 97% rename from subfunctions/f_BSLinGame.m rename to demos/_models/f_BSLinGame.m index 7bb0b52b..c88b534e 100644 --- a/subfunctions/f_BSLinGame.m +++ b/demos/_models/f_BSLinGame.m @@ -25,7 +25,7 @@ % OUT: % - fx: updated sufficient statistics of log-odds of P(o=1) -if isweird(u) % e.g., 1st trial +if VBA_isWeird (u) % e.g., 1st trial fx = x; return end diff --git a/subfunctions/f_CaBBI_FHN.m b/demos/_models/f_CaBBI_FHN.m similarity index 100% rename from subfunctions/f_CaBBI_FHN.m rename to demos/_models/f_CaBBI_FHN.m diff --git a/subfunctions/f_CaBBI_QGIF.m b/demos/_models/f_CaBBI_QGIF.m similarity index 100% rename from subfunctions/f_CaBBI_QGIF.m rename to demos/_models/f_CaBBI_QGIF.m diff --git a/subfunctions/f_DCMwHRF.m b/demos/_models/f_DCMwHRF.m similarity index 100% rename from subfunctions/f_DCMwHRF.m rename to demos/_models/f_DCMwHRF.m diff --git a/subfunctions/f_DCMwHRFext.m b/demos/_models/f_DCMwHRFext.m similarity index 100% rename from subfunctions/f_DCMwHRFext.m rename to demos/_models/f_DCMwHRFext.m diff --git a/subfunctions/f_FitzHughNagumo.m b/demos/_models/f_FitzHughNagumo.m similarity index 100% rename from subfunctions/f_FitzHughNagumo.m rename to demos/_models/f_FitzHughNagumo.m diff --git a/subfunctions/f_FitzHughNagumo_calcium.m b/demos/_models/f_FitzHughNagumo_calcium.m similarity index 100% rename from subfunctions/f_FitzHughNagumo_calcium.m rename to demos/_models/f_FitzHughNagumo_calcium.m diff --git a/subfunctions/f_HGFinGame.m b/demos/_models/f_HGFinGame.m similarity index 95% rename from subfunctions/f_HGFinGame.m rename to demos/_models/f_HGFinGame.m index d0641c12..071c03ad 100644 --- a/subfunctions/f_HGFinGame.m +++ b/demos/_models/f_HGFinGame.m @@ -19,7 +19,7 @@ % - fx: the updated posterior sufficient statistics (having observed o). -if isweird(u) % e.g., 1st trial +if VBA_isWeird (u) % e.g., 1st trial fx = x; return end diff --git a/subfunctions/f_HH.m b/demos/_models/f_HH.m similarity index 83% rename from subfunctions/f_HH.m rename to demos/_models/f_HH.m index 60e92fa5..39e752fa 100644 --- a/subfunctions/f_HH.m +++ b/demos/_models/f_HH.m @@ -1,4 +1,4 @@ -function [fx] = f_HH(Xt,Theta,ut,inF) +function [fx] = f_HH(Xt, Theta, ut, inF) % Hodgkin-Hoxley membrane potential evolution function % function [fx] = f_HH(Xt,Theta,ut,inF) % IN: @@ -24,9 +24,9 @@ EL = 10.6; V = Xt(1); -m = mySig(Xt(2)); -n = mySig(Xt(3)); -h = mySig(Xt(4)); +m = VBA_sigmoid(Xt(2)); +n = VBA_sigmoid(Xt(3)); +h = VBA_sigmoid(Xt(4)); an = (0.1 - 0.01*V)./(exp(1-0.1*V)-1); am = (2.5 - 0.1*V)./(exp(2.5-0.1*V)-1); @@ -37,13 +37,11 @@ r = 1e-2; % for numerical stability -xdot = [ (-gNa*m.^3*h*(V-ENa) - gK*n.^4*(V-EK) - gL*(V-EL) + 1e0*ut)/C +xdot = [ (-gNa*m.^3*h*(V-ENa) - gK*n.^4*(V-EK) - gL*(V-EL) + ut)/C (am*(1-m) - bm*m)/(m-m.^2+r) (an*(1-n) - bn*n)/(n-n.^2+r) (ah*(1-h) - bh*h)/(h-h.^2+r) ]; fx = Xt + deltat.*xdot; -function sx = mySig(x) -sx = 1./(1+exp(-x)); - +end diff --git a/subfunctions/f_HRF.m b/demos/_models/f_HRF.m similarity index 91% rename from subfunctions/f_HRF.m rename to demos/_models/f_HRF.m index 4525234c..8fedaf43 100644 --- a/subfunctions/f_HRF.m +++ b/demos/_models/f_HRF.m @@ -9,8 +9,8 @@ % Get parameters [E0,V0,tau0,kaf,kas,epsilon,alpha] = BOLD_parameters; if ~isempty(P) - iE0 = sigm(E0,struct('beta',1,'G0',1,'INV',1)); - E0 = sigm(P(1)+iE0,struct('beta',1,'G0',1,'INV',0)); + iE0 = VBA_sigmoid(E0,'inverse',true); + E0 = VBA_sigmoid(P(1)+iE0); tau0 = tau0.*exp(P(2)); kaf = kaf.*exp(P(3)); kas = kas.*exp(P(4)); diff --git a/subfunctions/f_HRF2.m b/demos/_models/f_HRF2.m similarity index 100% rename from subfunctions/f_HRF2.m rename to demos/_models/f_HRF2.m diff --git a/subfunctions/f_HRF3.m b/demos/_models/f_HRF3.m similarity index 100% rename from subfunctions/f_HRF3.m rename to demos/_models/f_HRF3.m diff --git a/subfunctions/f_Hampton.m b/demos/_models/f_Hampton.m similarity index 90% rename from subfunctions/f_Hampton.m rename to demos/_models/f_Hampton.m index c2123c2f..ff83df89 100644 --- a/subfunctions/f_Hampton.m +++ b/demos/_models/f_Hampton.m @@ -26,16 +26,16 @@ % OUT: % - fx: updated hidden states -if isweird(u) % e.g., 1st trial +if VBA_isWeird (u) % e.g., 1st trial fx = x; return; end o = u(1); % opponent's last choice (o) a = u(2); % agent's last choice (a) -p0 = sigmoid(x(1)); % previous estimate of P(o=1) -eta = sig(P(1)); % weight of PE1 -lambda = sig(P(2)); % weight of PE2 +p0 = VBA_sigmoid(x(1)); % previous estimate of P(o=1) +eta = VBA_sigmoid(P(1)); % weight of PE1 +lambda = VBA_sigmoid(P(2)); % weight of PE2 beta = exp(P(3)); % opponent's temperature % derive first-order prediction error @@ -59,5 +59,5 @@ % "influence" learning rule p = p0 + eta*PE1 + lambda*k1*p0*(1-p0)*PE2; % P(o=1) p = max([min([p,1]),0]); % bound p between 0 and 1 -fx = invsigmoid(p); % for numerical reasons +fx = VBA_sigmoid(p,'inverse',true); % for numerical reasons diff --git a/subfunctions/f_Henon.m b/demos/_models/f_Henon.m similarity index 100% rename from subfunctions/f_Henon.m rename to demos/_models/f_Henon.m diff --git a/f_Id.m b/demos/_models/f_Id.m similarity index 100% rename from f_Id.m rename to demos/_models/f_Id.m diff --git a/subfunctions/f_L1.m b/demos/_models/f_L1.m similarity index 100% rename from subfunctions/f_L1.m rename to demos/_models/f_L1.m diff --git a/subfunctions/f_LV2D.m b/demos/_models/f_LV2D.m similarity index 100% rename from subfunctions/f_LV2D.m rename to demos/_models/f_LV2D.m diff --git a/subfunctions/f_Lorenz.m b/demos/_models/f_Lorenz.m similarity index 100% rename from subfunctions/f_Lorenz.m rename to demos/_models/f_Lorenz.m diff --git a/subfunctions/f_LotkaVolterra.m b/demos/_models/f_LotkaVolterra.m similarity index 100% rename from subfunctions/f_LotkaVolterra.m rename to demos/_models/f_LotkaVolterra.m diff --git a/subfunctions/f_OpLearn.m b/demos/_models/f_OpLearn.m similarity index 100% rename from subfunctions/f_OpLearn.m rename to demos/_models/f_OpLearn.m diff --git a/subfunctions/f_PSP.m b/demos/_models/f_PSP.m similarity index 100% rename from subfunctions/f_PSP.m rename to demos/_models/f_PSP.m diff --git a/subfunctions/f_Qlearn.m b/demos/_models/f_Qlearn.m similarity index 96% rename from subfunctions/f_Qlearn.m rename to demos/_models/f_Qlearn.m index c8b22bbe..7b8df858 100644 --- a/subfunctions/f_Qlearn.m +++ b/demos/_models/f_Qlearn.m @@ -16,7 +16,7 @@ % - fx: updated action values % - dfdx/dfdP: gradients for VBA inversion -alpha = sig(P(1)); % learning rate +alpha = VBA_sigmoid(P(1)); % learning rate a = 2-u(1); % index of agent's last chosen action r = u(2); % feedback fx = x; % identity mapping diff --git a/subfunctions/f_Qlearn2.m b/demos/_models/f_Qlearn2.m similarity index 100% rename from subfunctions/f_Qlearn2.m rename to demos/_models/f_Qlearn2.m diff --git a/subfunctions/f_Qlearn_dynLR.m b/demos/_models/f_Qlearn_dynLR.m similarity index 100% rename from subfunctions/f_Qlearn_dynLR.m rename to demos/_models/f_Qlearn_dynLR.m diff --git a/subfunctions/f_Qlearn_gammaLR.m b/demos/_models/f_Qlearn_gammaLR.m similarity index 100% rename from subfunctions/f_Qlearn_gammaLR.m rename to demos/_models/f_Qlearn_gammaLR.m diff --git a/models/f_Qlearning.m b/demos/_models/f_Qlearning.m similarity index 98% rename from models/f_Qlearning.m rename to demos/_models/f_Qlearning.m index f79d3a27..87fedded 100644 --- a/models/f_Qlearning.m +++ b/demos/_models/f_Qlearning.m @@ -31,7 +31,7 @@ % acceptable range for each parameter. % learning rate -alpha = sig(P); % sig: [-Inf,Inf] -> [0 1] +alpha = VBA_sigmoid(P); % [-Inf,Inf] -> [0 1] % Apply delta-rule to update action values diff --git a/demos/_models/f_QlearningAsym.m b/demos/_models/f_QlearningAsym.m new file mode 100644 index 00000000..7945e3ed --- /dev/null +++ b/demos/_models/f_QlearningAsym.m @@ -0,0 +1,76 @@ +function [fx, dfdx, dfdp] = f_QlearningAsym (x, P, u, in) +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% [fx, dfdx, dfdp] = f_QlearningAsym (x, P, u, in) +% Reinforcement-learning evolution function for a n-armed bandit task with +% asymmetric learning for positive and negative feedbakcs +% +% An RL agent learns by trial and error. A bandit task is such that, after +% each action, the agent receives a feedback (reward if positive, +% punishment if negative). The RL agent updates its action values as +% follows: +% V(chosen action) = V(chosen action) + alpha*(feedback-V(chosen action)) +% V(unchosen action) = V(unchosen action) +% IN: +% - x: action values (n x 1) +% - P: learning rate (will be sigmoid transformed) +% - u: (1) previous action +% (2) feedback received for previous action +% (3:4) unused +% - in: [useless] +% OUT: +% - fx: updated action values +% - dfdx/dfdP: gradients for VBA inversion + +% ///////////////////////////////////////////////////////////////////////// + +n = numel(x); + +% In the case there was no feedback, do nothing +% ========================================================================= + +if isnan(u(2)) + fx = x; + dfdx = eye(n); + dfdp = zeros(numel(P),n); + return; +end + +% Apply delta-rule to update action values +% ========================================================================= + +% get experimental conditions +cueIdx = u(3 : 4); +prevActionIdx = cueIdx(u(1) + 1); % action 0 is first index +feedback = u(2); + +% start with previous values +fx = x; + +% prediction error +delta = feedback - x(prevActionIdx); + +% asymmetric learning rate +alpha = VBA_sigmoid(P(1) + sign (delta) * P(2)); % [-Inf,Inf] -> [0 1] + +% update Q-value +fx(prevActionIdx) = x(prevActionIdx) + alpha*delta; % update chosen value + + +% Compute evolution function's gradient +% ========================================================================= +% This is not necessary, as the toolbox will approximate those gradients if +% needed. However, providing the analytical gradient can higly speed-up the +% inversion. + + +% derivative w.r.t hidden state +% ------------------------------------------------------------------------- +dfdx = eye(n); +dfdx(prevActionIdx,prevActionIdx) = 1 - alpha; + +% derivative w.r.t parameters +% ------------------------------------------------------------------------- +dfdp = zeros(2,n); +dfdp(1,prevActionIdx) = alpha * (1 - alpha) * delta; +dfdp(2,prevActionIdx) = alpha * (1 - alpha) * sign(delta) * delta; diff --git a/subfunctions/f_RLinGame.m b/demos/_models/f_RLinGame.m similarity index 97% rename from subfunctions/f_RLinGame.m rename to demos/_models/f_RLinGame.m index 7c8d902c..3e67ccd0 100644 --- a/subfunctions/f_RLinGame.m +++ b/demos/_models/f_RLinGame.m @@ -21,7 +21,7 @@ % - fx: updated action values % - dfdx/dfdP: gradients for VBA inversion -if isweird(u) % e.g., 1st trial +if VBA_isWeird (u) % e.g., 1st trial fx = x; return end diff --git a/demos/_models/f_Rossler.m b/demos/_models/f_Rossler.m new file mode 100644 index 00000000..3b31c8b4 --- /dev/null +++ b/demos/_models/f_Rossler.m @@ -0,0 +1,17 @@ +function [fx, dfdx, dfdp] = f_Rossler(x,P,u,in) + +% Rossler oscillator evolution function + +a = P(1); +b = P(2); +c = P(3); + +xdot = - x(2) - x(3); +ydot = x(1) + a.*x(2); +zdot = b + x(3).*(x(1)-c); + +fx = x + in.deltat.*[xdot;ydot;zdot]; + +dfdx = eye(3) + in.deltat * [0, 1, x(3); - 1, a, 0; -1, 0, (x(1)- c)]; + +dfdp = in.deltat * [0, x(2), 0; 0, 0, 1; 0, 0, - x(3)] ; \ No newline at end of file diff --git a/demos/_models/f_SHC.m b/demos/_models/f_SHC.m new file mode 100644 index 00000000..123e3ffe --- /dev/null +++ b/demos/_models/f_SHC.m @@ -0,0 +1,76 @@ +function [fx, dF_dX] = f_SHC (Xt, ~, ~, inF) +% stable heteroclinic channels evolution function + +deltat = inF.deltat; +x = Xt; + +if ~ isfield (inF, 'K') + inF.K = [1/2; 1/32]; +end +if ~ isfield (inF, 'lambda') + inF.lambda = 0.3; +end + +if ~ isfield (inF, 'G0') + inF.G0 = 50; +end +if ~ isfield (inF, 'beta') + inF.beta = 0.5; +end + +if ~ isfield (inF, 'ind1') || ~ isfield (inF, 'R1') + inF.ind1 = 1 : 4; + inF.R1{1} = ... + [1 5 5 0.5 + 0.5 1 5 5 + 5 0.5 1 5 + 5 5 0.5 1]; + inF.R1{2} = ... + [1 5 5 0.5 + 5 1 0.5 5 + 0.5 5 1 5 + 5 0.5 5 1]; + inF.R1{3} = ... + [1 0.5 5 5 + 5 1 0.5 5 + 5 5 1 0.5 + 0.5 5 1 5]; +end +if ~ isfield (inF, 'ind2') || ~ isfield (inF, 'R2') + inF.ind2 = 5 : 7; + inF.R2 = ... + [1 5 0.5 + 0.5 1 5 + 5 0.5 1 ]; +end + + +% Separate states +x1 = x(inF.ind1); +x2 = x(inF.ind2); + +% High level +[SX2, dsdx2] = VBA_sigmoid (x2, 'scale', inF.G0, 'slope', inF.beta); +ff{2} = inF.K(2) * (- inF.lambda * x2 - inF.R2 * SX2); + +% Low level +[SX1, dsdx1] = VBA_sigmoid (x1, 'scale', inF.G0, 'slope', inF.beta); +R1eff = inF.R1{1} * SX2(1) + inF.R1{2} * SX2(2) + inF.R1{3} * SX2(3); +ff{1} = inF.K(1) * (- inF.lambda * x1 - R1eff * SX1); + +f = [ff{1}; ff{2}]; + +df1 = zeros (size (f, 1), length (inF.ind1)); +df2 = zeros (size (f, 1), length (inF.ind2)); + +df2(inF.ind2, :) = inF.K(2) * inF.lambda * (- eye (length (inF.ind2))) - inF.K(2) * inF.R2 * diag (dsdx2); +df1(inF.ind1, :) = inF.K(1) * inF.lambda * (- eye (length (inF.ind1))) - inF.K(1) * R1eff * diag (dsdx1); + +df_dx = [df1, df2]'; +df_dx(inF.ind2(1), inF.ind1) = (- inF.K(1) * dsdx2(1) * inF.R1{1} * SX1)'; +df_dx(inF.ind2(2), inF.ind1) = (- inF.K(1) * dsdx2(2) * inF.R1{2} * SX1)'; +df_dx(inF.ind2(3), inF.ind1) = (- inF.K(1) * dsdx2(3) * inF.R1{3} * SX1)'; + +fx = Xt + deltat * f; +dF_dX = eye (length (Xt)) + deltat * df_dx; + diff --git a/subfunctions/f_VBfree.m b/demos/_models/f_VBfree.m similarity index 100% rename from subfunctions/f_VBfree.m rename to demos/_models/f_VBfree.m diff --git a/subfunctions/f_VBvolatile0.m b/demos/_models/f_VBvolatile0.m similarity index 85% rename from subfunctions/f_VBvolatile0.m rename to demos/_models/f_VBvolatile0.m index c6ba8be5..a71ec80c 100644 --- a/subfunctions/f_VBvolatile0.m +++ b/demos/_models/f_VBvolatile0.m @@ -21,9 +21,9 @@ % transform and define states and parameters x(3) = exp(x(3)); % variance on second-level states is in log-space x(5) = exp(x(5)); % variance on third-level states is in log-space -ka = in.lev2*sgm(P(1),in.kaub); +ka = in.lev2 * VBA_sigmoid(P(1), 'scale', in.kaub); om = P(2); % volatility rescaling -th = sgm(P(3),in.thub); +th = VBA_sigmoid(P(3), 'scale', in.thub); vol = exp(ka*x(4)+om); fx = zeros(size(x)); @@ -31,12 +31,12 @@ fx(1) = u(1); % trivial first-level states % 2nd level -s1h = sgm(x(2),1)*(1-sgm(x(2),1)); % likelihood precision -pe1 = fx(1) - sgm(x(2),1); % prediction error +s1h = VBA_sigmoid(x(2))*(1-VBA_sigmoid(x(2))); % likelihood precision +pe1 = fx(1) - VBA_sigmoid(x(2)); % prediction error s2h = x(3) + vol; % 2nd-level prediction variance fx(3) = 1/(s2h^-1 + s1h); % posterior variance fx(2) = x(2) + fx(3)*pe1; % 2nd-level update -fx(2) = invsigmoid(sigmoid(fx(2))); % for numerical purposes +fx(2) = VBA_sigmoid(VBA_sigmoid(fx(2)),'inverse',true); % for numerical purposes % 3rd level pi3h = 1/(x(5)+th); diff --git a/demos/_models/f_alpha.m b/demos/_models/f_alpha.m new file mode 100755 index 00000000..107565c6 --- /dev/null +++ b/demos/_models/f_alpha.m @@ -0,0 +1,8 @@ +function [fx] = f_alpha(x,P,u,in) +H = P(1); +T = exp(P(2)); + +dx = [x(2); ... + (H / T) * u - (2 / T) * x(2) - ( 1 / T ^ 2) * x(1)]; + +fx = x + in.dt * dx; \ No newline at end of file diff --git a/subfunctions/f_dbw.m b/demos/_models/f_dbw.m similarity index 100% rename from subfunctions/f_dbw.m rename to demos/_models/f_dbw.m diff --git a/subfunctions/f_dcm4fmri.m b/demos/_models/f_dcm4fmri.m similarity index 100% rename from subfunctions/f_dcm4fmri.m rename to demos/_models/f_dcm4fmri.m diff --git a/subfunctions/f_dcm4fmri0.m b/demos/_models/f_dcm4fmri0.m similarity index 100% rename from subfunctions/f_dcm4fmri0.m rename to demos/_models/f_dcm4fmri0.m diff --git a/subfunctions/f_dcm_extension.m b/demos/_models/f_dcm_extension.m similarity index 100% rename from subfunctions/f_dcm_extension.m rename to demos/_models/f_dcm_extension.m diff --git a/subfunctions/f_dcm_withU.m b/demos/_models/f_dcm_withU.m similarity index 100% rename from subfunctions/f_dcm_withU.m rename to demos/_models/f_dcm_withU.m diff --git a/subfunctions/f_doubleWell.m b/demos/_models/f_doubleWell.m similarity index 100% rename from subfunctions/f_doubleWell.m rename to demos/_models/f_doubleWell.m diff --git a/f_embed.m b/demos/_models/f_embed.m similarity index 100% rename from f_embed.m rename to demos/_models/f_embed.m diff --git a/subfunctions/f_embed0.m b/demos/_models/f_embed0.m similarity index 100% rename from subfunctions/f_embed0.m rename to demos/_models/f_embed0.m diff --git a/subfunctions/f_embedAR.m b/demos/_models/f_embedAR.m similarity index 100% rename from subfunctions/f_embedAR.m rename to demos/_models/f_embedAR.m diff --git a/subfunctions/f_fullDCM4fmri.m b/demos/_models/f_fullDCM4fmri.m similarity index 100% rename from subfunctions/f_fullDCM4fmri.m rename to demos/_models/f_fullDCM4fmri.m diff --git a/subfunctions/f_gen.m b/demos/_models/f_gen.m similarity index 100% rename from subfunctions/f_gen.m rename to demos/_models/f_gen.m diff --git a/subfunctions/f_kToM.m b/demos/_models/f_kToM.m similarity index 97% rename from subfunctions/f_kToM.m rename to demos/_models/f_kToM.m index 05f8de58..e6d628f8 100644 --- a/subfunctions/f_kToM.m +++ b/demos/_models/f_kToM.m @@ -23,7 +23,7 @@ % - fx: updated hidden states % [see RecToMfunction.m] -if isweird(u) % e.g., 1st trial +if VBA_isWeird (u) % e.g., 1st trial fx = x; return end diff --git a/subfunctions/f_lin1D.m b/demos/_models/f_lin1D.m similarity index 100% rename from subfunctions/f_lin1D.m rename to demos/_models/f_lin1D.m diff --git a/demos/_models/f_lin2D.m b/demos/_models/f_lin2D.m new file mode 100644 index 00000000..3854e517 --- /dev/null +++ b/demos/_models/f_lin2D.m @@ -0,0 +1,21 @@ +function [fx,dF_dX,dF_dTheta] = f_lin2D(Xt,Theta,ut,inF) +% dummy 2D linear evolution function + +deltat = inF.deltat; + +if ~ isfield (inF, 'a') + inF.a = 1; +end +if ~ isfield (inF, 'b') + inF.b = 1e-1; +end + +inF.a = inF.a * exp (Theta(1)); + +A = [- inF.b, - inF.a; + 1, - inF.b]; + +fx = Xt + deltat * (A * Xt + ut); +dF_dX = eye (size (Xt, 1)) + deltat * A'; + +dF_dTheta = deltat * [- inF.a * Xt(2), 0]; \ No newline at end of file diff --git a/subfunctions/f_metaToM.m b/demos/_models/f_metaToM.m similarity index 88% rename from subfunctions/f_metaToM.m rename to demos/_models/f_metaToM.m index 62609b51..35af7a8c 100644 --- a/subfunctions/f_metaToM.m +++ b/demos/_models/f_metaToM.m @@ -27,10 +27,10 @@ fx = NaN(size(x)); % initialize updated states % 1- update P(agent=kToM) -Pi0 = sigmoid(x(inF.meta.indx)); % prior P(agent=1) +Pi0 = VBA_sigmoid(x(inF.meta.indx)); % prior P(agent=1) % partial forgetting of prior belief on opponent's type? if inF.meta.diluteP - dc = sigmoid(P(inF.meta.indP)); % dilution coefficient + dc = VBA_sigmoid(P(inF.meta.indP)); % dilution coefficient Pi0 = (1-dc).*Pi0 + dc./2; end @@ -43,12 +43,12 @@ if level==0 % 0-ToM [should be useless] mx = xktom(1); % E[log-odds of P(o=1)] Vx = exp(xktom(2)); % V[log-odds of P(o=1)] - Els1 = Elogsig(mx,Vx); - Els0 = Elogsig(-mx,Vx); + Els1 = VBA_Elogsig(mx,Vx); + Els0 = VBA_Elogsig(-mx,Vx); ELL = ot.*Els1 + (1-ot).*Els0; h_kToM = exp(ELL); % P(o|k-ToM) else - Pk = sigmoid(x(1:(level-1))); % P(k'), with k'=0,...,k-1 + Pk = VBA_sigmoid(x(1:(level-1))); % P(k'), with k'=0,...,k-1 Pk = [Pk;max(0,1-sum(Pk))]; % insert last P(k'=k-1) f = zeros(level,1); % E[x(theta)] Vx = zeros(level,1); % V[x(theta)] @@ -58,8 +58,8 @@ Sig = exp(xktom(indlev(j).Par(2:2:2*ntotPar))); % V[theta|k'=j-1] Vx(j) = sum(Sig.*df.^2); % V[x(theta)|k'=j-1] end - Els1 = Elogsig(f,Vx); - Els0 = Elogsig(-f,Vx); + Els1 = VBA_Elogsig(f,Vx); + Els0 = VBA_Elogsig(-f,Vx); ELL = ot.*Els1 + (1-ot).*Els0; h_kToM = exp(Pk'*ELL); % P(o|k-ToM) end @@ -71,7 +71,7 @@ useq(2) = []; K = inFseq.K; % sequence depth yb = useq(2:K+1); % previous outcomes -if isweird(yb) +if VBA_isWeird (yb) h_seq = 1/2; else if K >0 @@ -81,15 +81,15 @@ end m = xseq(indSeq); v = exp(xseq((2^K)+indSeq)); - Els1 = Elogsig(m,v); - Els0 = Elogsig(-m,v); + Els1 = VBA_Elogsig(m,v); + Els0 = VBA_Elogsig(-m,v); ELL = ot.*Els1 + (1-ot).*Els0; h_seq = exp(ELL); % P(o|seq) end % VB update of P(agent=kToM) Pi = Pi0.*h_kToM./(Pi0.*h_kToM+(1-Pi0).*h_seq); -fx(inF.meta.indx) = invsigmoid(Pi); +fx(inF.meta.indx) = VBA_sigmoid(Pi, 'inverse', true); % 2- update k-ToM belief diff --git a/subfunctions/f_replicator.m b/demos/_models/f_replicator.m similarity index 100% rename from subfunctions/f_replicator.m rename to demos/_models/f_replicator.m diff --git a/demos/_models/f_rwl.m b/demos/_models/f_rwl.m new file mode 100644 index 00000000..faf91e32 --- /dev/null +++ b/demos/_models/f_rwl.m @@ -0,0 +1,7 @@ +function fx = f_rwl(x,P,u,in) +fx = x; +for i = 1 : size (u, 1) + if ~ VBA_isWeird (u(i)) + fx = fx + P(i) * (u(i) - x); + end +end \ No newline at end of file diff --git a/subfunctions/f_rwl2.m b/demos/_models/f_rwl2.m similarity index 84% rename from subfunctions/f_rwl2.m rename to demos/_models/f_rwl2.m index 35b4825e..4712c778 100644 --- a/subfunctions/f_rwl2.m +++ b/demos/_models/f_rwl2.m @@ -19,16 +19,16 @@ switch in.model case 'utility' % weigths on feedbacks r = P(in.indR(u(in.indu)+2))*u(in.indu); - alpha = sigm(P(in.indAlpha)); + alpha = VBA_sigmoid(P(in.indAlpha)); case 'learning' % different learning rates r = P(in.indR)*u(in.indu); - alpha = sigm(P(in.indAlpha(u(in.indu)+2))); + alpha = VBA_sigmoid(P(in.indAlpha(u(in.indu)+2))); case 'both' r = P(in.indR(u(in.indu)+2))*u(in.indu); - alpha = sigm(P(in.indAlpha(u(in.indu)+2))); + alpha = VBA_sigmoid(P(in.indAlpha(u(in.indu)+2))); case 'none' r = P(in.indR)*u(in.indu); - alpha = sigm(P(in.indAlpha)); + alpha = VBA_sigmoid(P(in.indAlpha)); otherwise error end diff --git a/subfunctions/f_vanDerPol.m b/demos/_models/f_vanDerPol.m similarity index 100% rename from subfunctions/f_vanDerPol.m rename to demos/_models/f_vanDerPol.m diff --git a/demos/_models/f_vgo.m b/demos/_models/f_vgo.m new file mode 100644 index 00000000..ebbf1d4d --- /dev/null +++ b/demos/_models/f_vgo.m @@ -0,0 +1,2 @@ +function [fx] = f_vgo(x,P,u,in) +fx = x + VBA_sigmoid(P(1))*(P(2)*u-x); \ No newline at end of file diff --git a/subfunctions/f_wsls.m b/demos/_models/f_wsls.m similarity index 100% rename from subfunctions/f_wsls.m rename to demos/_models/f_wsls.m diff --git a/subfunctions/f_wslsinGame.m b/demos/_models/f_wslsinGame.m similarity index 97% rename from subfunctions/f_wslsinGame.m rename to demos/_models/f_wslsinGame.m index 038a03b4..909a752d 100644 --- a/subfunctions/f_wslsinGame.m +++ b/demos/_models/f_wslsinGame.m @@ -19,7 +19,7 @@ % OUT: % - fx: evolved pseudo q-values (2x1) -if isweird(u) % e.g., 1st trial +if VBA_isWeird (u) % e.g., 1st trial fx = x; return end diff --git a/subfunctions/g_2AFC_basis.m b/demos/_models/g_2AFC_basis.m similarity index 95% rename from subfunctions/g_2AFC_basis.m rename to demos/_models/g_2AFC_basis.m index d57a8d75..ded88d40 100755 --- a/subfunctions/g_2AFC_basis.m +++ b/demos/_models/g_2AFC_basis.m @@ -30,11 +30,11 @@ for i=1:2 [tmp,ix(i)] = min((in.gx-x(i)).^2); [tmp,iy(i)] = min((in.gy-y(i)).^2); - V(:,i) = vec(in.bf(ix(i),iy(i),:)); + V(:,i) = VBA_vec(in.bf(ix(i),iy(i),:)); end dV = V(:,1)-V(:,2); dv = dV'*P; -gx = sig(dv); +gx = VBA_sigmoid(dv); dsdx = gx.*(1-gx); dgdx = []; dgdp = dsdx*dV; diff --git a/subfunctions/g_AVL.m b/demos/_models/g_AVL.m similarity index 97% rename from subfunctions/g_AVL.m rename to demos/_models/g_AVL.m index b6bba66a..f8a3aff0 100644 --- a/subfunctions/g_AVL.m +++ b/demos/_models/g_AVL.m @@ -23,7 +23,6 @@ if le > 0 - % [le,dledy] = logExp(y,[ 1 1 0 ]); gx = 0.5.*exp(-Phi(1)).*log(le); dydx = [ 2*exp(Phi(1)-Phi(2)).*(2*u(in.uc)-1) diff --git a/subfunctions/g_BSL.m b/demos/_models/g_BSL.m similarity index 88% rename from subfunctions/g_BSL.m rename to demos/_models/g_BSL.m index 387cc59b..b4dc4564 100644 --- a/subfunctions/g_BSL.m +++ b/demos/_models/g_BSL.m @@ -13,7 +13,7 @@ % OUT: % - gx: P(y_t=1|y_{t-1}) -if isweird(u) % e.g., 1st trial +if VBA_isWeird (u) % e.g., 1st trial gx = 0.5; return end @@ -29,6 +29,6 @@ end m = x(indSeq); v = exp(x((2^K)+indSeq)); -gx = sig(phi(2)+exp(phi(1)).*m./sqrt(1+a*v)); % E[sigm(log-odds of P(y))] +gx = VBA_sigmoid(phi(2)+exp(phi(1)).*m./sqrt(1+a*v)); % E[sigm(log-odds of P(y))] diff --git a/subfunctions/g_BSLinGame.m b/demos/_models/g_BSLinGame.m similarity index 96% rename from subfunctions/g_BSLinGame.m rename to demos/_models/g_BSLinGame.m index 12e93488..8a4336c6 100644 --- a/subfunctions/g_BSLinGame.m +++ b/demos/_models/g_BSLinGame.m @@ -28,5 +28,5 @@ % Make decision based upon the likely opponent's next move DV = fplayer(Po,exp(P(1)),in.player,in.game); % incentive for a=1 -gx = sigmoid(DV+P(2)); % P(a=1) with bias +gx = VBA_sigmoid(DV+P(2)); % P(a=1) with bias diff --git a/subfunctions/g_CaBBI.m b/demos/_models/g_CaBBI.m similarity index 100% rename from subfunctions/g_CaBBI.m rename to demos/_models/g_CaBBI.m diff --git a/subfunctions/g_DCMwHRFext.m b/demos/_models/g_DCMwHRFext.m similarity index 100% rename from subfunctions/g_DCMwHRFext.m rename to demos/_models/g_DCMwHRFext.m diff --git a/subfunctions/g_DG2.m b/demos/_models/g_DG2.m similarity index 100% rename from subfunctions/g_DG2.m rename to demos/_models/g_DG2.m diff --git a/subfunctions/g_DoubleGamma.m b/demos/_models/g_DoubleGamma.m similarity index 100% rename from subfunctions/g_DoubleGamma.m rename to demos/_models/g_DoubleGamma.m diff --git a/subfunctions/g_ERP.m b/demos/_models/g_ERP.m similarity index 100% rename from subfunctions/g_ERP.m rename to demos/_models/g_ERP.m diff --git a/subfunctions/g_ERP_reduced.m b/demos/_models/g_ERP_reduced.m similarity index 100% rename from subfunctions/g_ERP_reduced.m rename to demos/_models/g_ERP_reduced.m diff --git a/subfunctions/g_ExpUtil.m b/demos/_models/g_ExpUtil.m similarity index 71% rename from subfunctions/g_ExpUtil.m rename to demos/_models/g_ExpUtil.m index b26ff2f5..1278ff39 100644 --- a/subfunctions/g_ExpUtil.m +++ b/demos/_models/g_ExpUtil.m @@ -7,16 +7,10 @@ beta = exp(P); dQ = (x_t(1)-x_t(5)); -gx =sig( beta*dQ ); +gx = VBA_sigmoid( beta*dQ ); dgdx = zeros(8,1); dgdx(1) = beta*gx*(1-gx); dgdx(5) = -beta*gx*(1-gx); -dgdP = [beta*dQ*gx*(1-gx)]; - - -function y=sig(x) -y = 1/(1+exp(-x)); -y(y1-eps) = 1-eps; \ No newline at end of file +dgdP = [beta*dQ*gx*(1-gx)]; \ No newline at end of file diff --git a/subfunctions/g_Fourier.m b/demos/_models/g_Fourier.m similarity index 100% rename from subfunctions/g_Fourier.m rename to demos/_models/g_Fourier.m diff --git a/subfunctions/g_GLM.m b/demos/_models/g_GLM.m similarity index 100% rename from subfunctions/g_GLM.m rename to demos/_models/g_GLM.m diff --git a/subfunctions/g_GLM4decoding.m b/demos/_models/g_GLM4decoding.m similarity index 100% rename from subfunctions/g_GLM4decoding.m rename to demos/_models/g_GLM4decoding.m diff --git a/stats&plots/g_GLM_missingData.m b/demos/_models/g_GLM_missingData.m similarity index 100% rename from stats&plots/g_GLM_missingData.m rename to demos/_models/g_GLM_missingData.m diff --git a/demos/_models/g_GLMsparse.m b/demos/_models/g_GLMsparse.m new file mode 100644 index 00000000..e50cfbb3 --- /dev/null +++ b/demos/_models/g_GLMsparse.m @@ -0,0 +1,4 @@ +function [gx,dgdx,dgdP] = g_GLMsparse(x,P,u,in) +[sP, dsdP] = VBA_sparsifyPrior (P); +[gx,dgdx,dgdP] = g_GLM(x,sP,u,in); +dgdP = diag(dsdP)*dgdP; % for exploiting the analytical gradients from g_GLM diff --git a/demos/_models/g_GLMsparseAdapt.m b/demos/_models/g_GLMsparseAdapt.m new file mode 100644 index 00000000..3c5fb929 --- /dev/null +++ b/demos/_models/g_GLMsparseAdapt.m @@ -0,0 +1,10 @@ +function [gx,dgdx,dgdP] = g_GLMsparseAdapt(x,P,u,in) + + +[sP, dsdx, dsdp] = VBA_sparsifyPrior (P(1:end-1), P(end)); + +[gx, ~, dgdP] = g_GLM (x, sP, u, in); + +dgdx = []; + +dgdP = [ diag(dsdx) * dgdP; dsdp' * dgdP]; diff --git a/subfunctions/g_GammaDensity.m b/demos/_models/g_GammaDensity.m similarity index 100% rename from subfunctions/g_GammaDensity.m rename to demos/_models/g_GammaDensity.m diff --git a/subfunctions/g_Gaussian.m b/demos/_models/g_Gaussian.m similarity index 100% rename from subfunctions/g_Gaussian.m rename to demos/_models/g_Gaussian.m diff --git a/subfunctions/g_HGFinGame.m b/demos/_models/g_HGFinGame.m similarity index 93% rename from subfunctions/g_HGFinGame.m rename to demos/_models/g_HGFinGame.m index 5a6f7fa7..6515f57d 100644 --- a/subfunctions/g_HGFinGame.m +++ b/demos/_models/g_HGFinGame.m @@ -29,10 +29,10 @@ % Get the agent's prediction about her opponent's next move, ie P(o=1). mx = x(2); % E[log-odds of P(o=1)] Vx = exp(x(3)); % V[log-odds of P(o=1)] -Po = sigmoid(mx/(sqrt(1+a*Vx))); % P(o=1) +Po = VBA_sigmoid(mx/(sqrt(1+a*Vx))); % P(o=1) % Make decision based upon the likely opponent's next move DV = fplayer(Po,exp(P(1)),player,game); % incentive for a=1 -gx = sigmoid(DV+P(2)); % P(a=1) with bias +gx = VBA_sigmoid(DV+P(2)); % P(a=1) with bias diff --git a/subfunctions/g_HRF3.m b/demos/_models/g_HRF3.m similarity index 98% rename from subfunctions/g_HRF3.m rename to demos/_models/g_HRF3.m index 74040143..6ccdc43f 100644 --- a/subfunctions/g_HRF3.m +++ b/demos/_models/g_HRF3.m @@ -41,7 +41,7 @@ in.ind2 = [in.ind2:2:2*nreg]; end else - [E0,dsdp] = sigm(P(1)-0.6633,struct('beta',1,'G0',1,'INV',0)); + [E0,dsdp] = VBA_sigmoid(P(1)-0.6633); E0 = E0(:); try epsilon = exp(P(2)); diff --git a/subfunctions/g_HRF_distributed.m b/demos/_models/g_HRF_distributed.m similarity index 100% rename from subfunctions/g_HRF_distributed.m rename to demos/_models/g_HRF_distributed.m diff --git a/subfunctions/g_Hampton.m b/demos/_models/g_Hampton.m similarity index 91% rename from subfunctions/g_Hampton.m rename to demos/_models/g_Hampton.m index 7aec133b..93b9a630 100644 --- a/subfunctions/g_Hampton.m +++ b/demos/_models/g_Hampton.m @@ -20,6 +20,6 @@ game = in.game; % game's payoff table player = in.player; % agent's role -Po = sigmoid(x(1)); % P(o=1) +Po = VBA_sigmoid(x(1)); % P(o=1) DV = fplayer(Po,exp(P(1)),player,game); % incentive for a=1 -gx = sigmoid(DV+P(2)); % P(a=1) with bias \ No newline at end of file +gx = VBA_sigmoid(DV+P(2)); % P(a=1) with bias \ No newline at end of file diff --git a/demos/_models/g_Id.m b/demos/_models/g_Id.m new file mode 100644 index 00000000..24e3fb8b --- /dev/null +++ b/demos/_models/g_Id.m @@ -0,0 +1,28 @@ +function [gx,dG_dX,dG_dPhi] = g_Id(Xt,Phi,ut,inG) +% Identity observation mapping (partially observable) + +n = size (Xt, 1); + +if ~ isfield (inG, 'ind') + inG.ind = 1:n; +end +if ~ isfield (inG, 'scale') + inG.scale = 1; +end + +G = eye (n); +G = inG.scale * G(inG.ind, :); + +if isfield(inG, 'k') + G = kron (G, ones(inG.k, 1)); +end + + +gx = G * Xt; +dG_dX = G'; + +if size (Phi, 1) > 0 + dG_dPhi = zeros (size (Phi, 1), size(G, 1)); +else + dG_dPhi = []; +end diff --git a/subfunctions/g_Id_phi.m b/demos/_models/g_Id_phi.m similarity index 100% rename from subfunctions/g_Id_phi.m rename to demos/_models/g_Id_phi.m diff --git a/subfunctions/g_LinDecomp.m b/demos/_models/g_LinDecomp.m similarity index 85% rename from subfunctions/g_LinDecomp.m rename to demos/_models/g_LinDecomp.m index f8fe57a2..6f18abb8 100644 --- a/subfunctions/g_LinDecomp.m +++ b/demos/_models/g_LinDecomp.m @@ -5,4 +5,4 @@ X(:,i) = P(in.ind(i).X); Y(i,:) = P(in.ind(i).Y)'; end -gx = vec(X*Y) + P(in.ind0); \ No newline at end of file +gx = VBA_vec(X*Y) + P(in.ind0); \ No newline at end of file diff --git a/subfunctions/g_NI.m b/demos/_models/g_NI.m similarity index 100% rename from subfunctions/g_NI.m rename to demos/_models/g_NI.m diff --git a/demos/_models/g_QLearning.m b/demos/_models/g_QLearning.m new file mode 100644 index 00000000..cee1649d --- /dev/null +++ b/demos/_models/g_QLearning.m @@ -0,0 +1,74 @@ +function [gx, dgdx, dgdp] = g_QLearning (x, P, u, in) +% // VBA toolbox ////////////////////////////////////////////////////////// +% +% [gx, dgdx, dgdP] = g_QLearning (x, P, u, in) +% softmax decision rule for Q-learning (N-armed bandit task) +% +% IN: +% - x: Q-values +% - P: (1) inverse (log-) temperature +% (2) bias +% - u: (idx(1)) index of 0 coded cue +% (idx(2)) index of 1 coded cue +% - in: +% - idx: position of inputs indicating Q-values to use +% OUT: +% - gx : P(y = 1|x) + +% ///////////////////////////////////////////////////////////////////////// + +% Get parameter values +% ========================================================================= + +% inverse temperature +beta = exp (P(1)); % exp: [-Inf,Inf] -> [0 Inf] + +% offset +try + const = P(2); +catch + const = 0; +end + +% Behavioural prediction +% ========================================================================= + +% get idx of Q-value to use +if numel (x) == 2 + % if only two arms, directly takes the two Q-values + idx = [1 2]; +else + % otherwise, by default the last 2 inputs should provide the curent + % cues + idx = u(end-1:end); +end + +% Compute Value differential +dQ = x(idx(2))-x(idx(1)); + +% make prediction +gx = VBA_sigmoid(beta * dQ + const); + + +% Compute gradients +% ========================================================================= + +% w.r.t hidden state +% ------------------------------------------------------------------------- +dgdx = zeros (numel (x), 1); +dgdx(idx) = [-1 1] * beta * gx * (1 - gx); + + +% w.r.t parameters +% ------------------------------------------------------------------------- +dgdp = zeros (numel (P), 1); + +% temperature +dgdp(1) = beta * dQ * gx * (1 - gx); + +% offset +if numel (P) > 1 + dgdp(2) = gx * (1 - gx); +end + + diff --git a/subfunctions/g_RFX.m b/demos/_models/g_RFX.m similarity index 100% rename from subfunctions/g_RFX.m rename to demos/_models/g_RFX.m diff --git a/subfunctions/g_Udummy.m b/demos/_models/g_Udummy.m similarity index 100% rename from subfunctions/g_Udummy.m rename to demos/_models/g_Udummy.m diff --git a/subfunctions/g_VBvolatile0.m b/demos/_models/g_VBvolatile0.m similarity index 91% rename from subfunctions/g_VBvolatile0.m rename to demos/_models/g_VBvolatile0.m index ddb3e4ad..c6104b3f 100644 --- a/subfunctions/g_VBvolatile0.m +++ b/demos/_models/g_VBvolatile0.m @@ -31,7 +31,7 @@ error(['Invalid or missing response model specification: ', respmod]); end -p1 = sgm(x1,1); % E[P(R|a=1)] -p0 = sgm(x0,1); % E[P(R|a=0)] -gx = sgm((p1-p0)*exp(P(1))+P(2),1); % P(a=1) +p1 = VBA_sigmoid(x1); % E[P(R|a=1)] +p0 = VBA_sigmoid(x0); % E[P(R|a=0)] +gx = VBA_sigmoid((p1-p0)*exp(P(1))+P(2)); % P(a=1) diff --git a/subfunctions/g_classif.m b/demos/_models/g_classif.m similarity index 100% rename from subfunctions/g_classif.m rename to demos/_models/g_classif.m diff --git a/subfunctions/g_classif0.m b/demos/_models/g_classif0.m similarity index 93% rename from subfunctions/g_classif0.m rename to demos/_models/g_classif0.m index 2a604ad4..57e5b9b5 100644 --- a/subfunctions/g_classif0.m +++ b/demos/_models/g_classif0.m @@ -12,7 +12,7 @@ % - dgdp: gradient of E[y|P] wrt P. if in.sparse - [sP,dsdP] = sparseTransform(P,in.sparseP); + [sP, dsdP] = VBA_sparsifyPrior(P); else sP = P; end diff --git a/g_conv0.m b/demos/_models/g_conv0.m similarity index 93% rename from g_conv0.m rename to demos/_models/g_conv0.m index 27c050cb..2dd406da 100644 --- a/g_conv0.m +++ b/demos/_models/g_conv0.m @@ -17,11 +17,10 @@ % - dgdx: [useless] % - dgdp: the gradient of the system's output w.r.t. kernel parameters % SEE ALSO: g_convSig -try - K = in.K; -catch - K = 0; +if ~ isfield (in, 'K') + in.K = 0; end + try dgdp = in.dgdp; catch @@ -36,8 +35,7 @@ end end end -% dgdp = VBA_orth(dgdp',1)'; dgdp(end,:) = 1; end -g = dgdp'*P + K; +g = dgdp'*P + in.K; dgdx = []; diff --git a/g_convSig.m b/demos/_models/g_convSig.m similarity index 98% rename from g_convSig.m rename to demos/_models/g_convSig.m index 2d39bbed..d99d4931 100644 --- a/g_convSig.m +++ b/demos/_models/g_convSig.m @@ -21,5 +21,5 @@ % SEE ALSO: g_conv0 [g,dsdx,dgdp] = g_conv0(x,P,u,in); -sg = sig(g); +sg = VBA_sigmoid(g); dgdp = dgdp*diag(sg.*(1-sg)); \ No newline at end of file diff --git a/subfunctions/g_convSig_approx.m b/demos/_models/g_convSig_approx.m similarity index 97% rename from subfunctions/g_convSig_approx.m rename to demos/_models/g_convSig_approx.m index 9ecdfaab..99369e64 100644 --- a/subfunctions/g_convSig_approx.m +++ b/demos/_models/g_convSig_approx.m @@ -22,6 +22,7 @@ % [g,~,dgdp] = g_conv_approx(x,P,u,in); g = g_conv_approx(x,P,u,in); -sg = sigm(g,struct('mat',1)); +sg = VBA_sigmoid(g); + % dsdx = []; % dsdp = dgdp*diag(sg.*(1-sg)); \ No newline at end of file diff --git a/subfunctions/kernel_sinexp.m b/demos/_models/g_conv_approx.m similarity index 54% rename from subfunctions/kernel_sinexp.m rename to demos/_models/g_conv_approx.m index 96967e49..180c382b 100644 --- a/subfunctions/kernel_sinexp.m +++ b/demos/_models/g_conv_approx.m @@ -1,3 +1,36 @@ +function [gx] = g_conv_approx(~,P,u,in) + +gx = P(end); % constant as starting point +nt = round(size(u,1)./in.dim.nu); + +% dgdx = []; + +% persistent K; +% if size(K,1) ~= nt +% K = zeros(nt,nt); +% end + +% dgdp = zeros(size(P,1),nt); + + + +for i=1:in.dim.nu + Pi = P((i-1)*3+1:3*i); + kernel = kernel_sinexp(Pi(1),Pi(2),Pi(3),0:in.deltat:in.dim.n_t); + ui = u((i-1)*nt+1:i*nt); + ki = conv(ui,kernel); + gx = gx + ki(1:numel(ui)); +% for j=1:nt +% idx = j:min(j+in.dim.n_t-1,nt) ; +% K(idx,j) = kernel(1:min(in.dim.n_t,nt-j+1)) ; +% dgdp((i-1)*3+1:3*i,idx) = dgdp((i-1)*3+1:3*i,idx) + [ui(idx),ui(idx),ui(idx)]'.*dkdp(:,1:min(in.dim.n_t,nt-j+1)) ; +% end +% gx = gx + K*ui; +end +% dgdp(end,:) = 1; + +end + function [k,dkdp,landmarks,dadp]=kernel_sinexp(A,phi,beta,tList) % diff --git a/subfunctions/g_demo_extended.m b/demos/_models/g_demo_extended.m similarity index 100% rename from subfunctions/g_demo_extended.m rename to demos/_models/g_demo_extended.m diff --git a/subfunctions/g_demo_susceptibility.m b/demos/_models/g_demo_susceptibility.m similarity index 100% rename from subfunctions/g_demo_susceptibility.m rename to demos/_models/g_demo_susceptibility.m diff --git a/subfunctions/g_discounting.m b/demos/_models/g_discounting.m similarity index 87% rename from subfunctions/g_discounting.m rename to demos/_models/g_discounting.m index 65390bb1..6cb6b7d0 100755 --- a/subfunctions/g_discounting.m +++ b/demos/_models/g_discounting.m @@ -18,9 +18,9 @@ for i=1:2 [tmp,i1(i)] = min((in.grid1-t(i)).^2); [tmp,i2(i)] = min((in.grid2-R(i)).^2); - v(i) = vec(in.bf(i1(i),i2(i),:))'*P; + v(i) = VBA_vec(in.bf(i1(i),i2(i),:))'*P; end end dv = v(1) - v(2); b = exp(-P(in.ind.logb)); -gx = sigm(b.*dv); +gx = VBA_sigmoid(b.*dv); diff --git a/subfunctions/g_dummy.m b/demos/_models/g_dummy.m similarity index 100% rename from subfunctions/g_dummy.m rename to demos/_models/g_dummy.m diff --git a/g_embed.m b/demos/_models/g_embed.m similarity index 100% rename from g_embed.m rename to demos/_models/g_embed.m diff --git a/subfunctions/g_embedAR.m b/demos/_models/g_embedAR.m similarity index 100% rename from subfunctions/g_embedAR.m rename to demos/_models/g_embedAR.m diff --git a/subfunctions/g_exp.m b/demos/_models/g_exp.m similarity index 100% rename from subfunctions/g_exp.m rename to demos/_models/g_exp.m diff --git a/subfunctions/g_exp2d.m b/demos/_models/g_exp2d.m similarity index 100% rename from subfunctions/g_exp2d.m rename to demos/_models/g_exp2d.m diff --git a/subfunctions/g_fullDCM4fmri.m b/demos/_models/g_fullDCM4fmri.m similarity index 100% rename from subfunctions/g_fullDCM4fmri.m rename to demos/_models/g_fullDCM4fmri.m diff --git a/subfunctions/g_goNogo.m b/demos/_models/g_goNogo.m similarity index 100% rename from subfunctions/g_goNogo.m rename to demos/_models/g_goNogo.m diff --git a/subfunctions/g_ip.m b/demos/_models/g_ip.m similarity index 100% rename from subfunctions/g_ip.m rename to demos/_models/g_ip.m diff --git a/subfunctions/g_kToM.m b/demos/_models/g_kToM.m similarity index 100% rename from subfunctions/g_kToM.m rename to demos/_models/g_kToM.m diff --git a/subfunctions/g_logistic.m b/demos/_models/g_logistic.m similarity index 50% rename from subfunctions/g_logistic.m rename to demos/_models/g_logistic.m index 0d974bc8..2932e44f 100644 --- a/subfunctions/g_logistic.m +++ b/demos/_models/g_logistic.m @@ -1,13 +1,5 @@ function [g,dgdx,dgdP] = g_logistic(x,P,u,in) % derives the probability of outcome variable y=1, under the logistic model -g = sig(u'*P); +g = VBA_sigmoid(u'*P); dgdx = []; -dgdP = u*diag(g.*(1-g)); - - - -function s = sig(x) -s = 1./(1+exp(-x)); -% s(s<1e-4) = 1e-4; -% s(s>1-1e-4) = 1-1e-4; - +dgdP = u*diag(g.*(1-g)); \ No newline at end of file diff --git a/subfunctions/g_matmap.m b/demos/_models/g_matmap.m similarity index 100% rename from subfunctions/g_matmap.m rename to demos/_models/g_matmap.m diff --git a/subfunctions/g_metaToM.m b/demos/_models/g_metaToM.m similarity index 96% rename from subfunctions/g_metaToM.m rename to demos/_models/g_metaToM.m index a0f38b29..f1e69437 100644 --- a/subfunctions/g_metaToM.m +++ b/demos/_models/g_metaToM.m @@ -34,7 +34,7 @@ % 3- derive meta-ToM probabilistic decision P(a=1) -Pi = sigmoid(x(inG.meta.indx)); % prior P(agent=kToM) +Pi = VBA_sigmoid(x(inG.meta.indx)); % prior P(agent=kToM) gx = Pi*gx_ktom + (1-Pi)*gx_bsl; diff --git a/subfunctions/g_mixU.m b/demos/_models/g_mixU.m similarity index 100% rename from subfunctions/g_mixU.m rename to demos/_models/g_mixU.m diff --git a/subfunctions/g_nl0.m b/demos/_models/g_nl0.m similarity index 100% rename from subfunctions/g_nl0.m rename to demos/_models/g_nl0.m diff --git a/subfunctions/g_odds.m b/demos/_models/g_odds.m similarity index 100% rename from subfunctions/g_odds.m rename to demos/_models/g_odds.m diff --git a/subfunctions/g_odds2.m b/demos/_models/g_odds2.m similarity index 100% rename from subfunctions/g_odds2.m rename to demos/_models/g_odds2.m diff --git a/subfunctions/g_rbf.m b/demos/_models/g_rbf.m similarity index 100% rename from subfunctions/g_rbf.m rename to demos/_models/g_rbf.m diff --git a/subfunctions/g_rigid2D.m b/demos/_models/g_rigid2D.m similarity index 100% rename from subfunctions/g_rigid2D.m rename to demos/_models/g_rigid2D.m diff --git a/subfunctions/g_sig.m b/demos/_models/g_sig.m similarity index 100% rename from subfunctions/g_sig.m rename to demos/_models/g_sig.m diff --git a/subfunctions/g_sig_u.m b/demos/_models/g_sig_u.m similarity index 53% rename from subfunctions/g_sig_u.m rename to demos/_models/g_sig_u.m index bb5ab0d1..a47aca9c 100644 --- a/subfunctions/g_sig_u.m +++ b/demos/_models/g_sig_u.m @@ -1,3 +1,3 @@ function [Sx] = g_sig_u(x,Phi,u,in) % sigmoid observation mapping -[Sx] = sigm(u,struct('mat',1),Phi); +[Sx] = VBA_sigmoid(u,'slope',exp(Phi(1)),'center',Phi(2)); \ No newline at end of file diff --git a/demos/_models/g_sigm.m b/demos/_models/g_sigm.m new file mode 100644 index 00000000..aadeac25 --- /dev/null +++ b/demos/_models/g_sigm.m @@ -0,0 +1,14 @@ +function [Sx,dsdx,dsdp] = g_sigm(x,Phi,u,in) +% sigmoid observation mapping +try + in.x; +catch + in.x = 1; +end + +if in.x + [Sx,dsdx,dsdp] = VBA_sigmoid(x); +else + [Sx,dsdp,dsdx] = VBA_sigmoid(Phi); + dsdp = diag(dsdp); +end \ No newline at end of file diff --git a/demos/_models/g_sigm_binomial.m b/demos/_models/g_sigm_binomial.m new file mode 100644 index 00000000..05b184cd --- /dev/null +++ b/demos/_models/g_sigm_binomial.m @@ -0,0 +1,29 @@ +function [Sx,dsdx,dsdp] = g_sigm_binomial(x,Phi,u,in) +% evaluates the sigmoid function for binomial data analysis + +try + in.x; +catch + in.x = 0; +end + +if in.x % for learning effects (sigmoid parameters evolve over time) + [Sx, ~, dsdx] = VBA_sigmoid(u, ... + 'slope', exp(x(1)), ... + 'center', x(2), ... + 'derivatives', {'slope','center'}); + + dsdx(1,:) = dsdx(1,:) * exp(x(1)); + dsdp = []; + +else + [Sx, ~, dsdp] = VBA_sigmoid(u,... + 'slope', exp(Phi(1)), ... + 'center',Phi(2), ... + 'derivatives', {'slope','center'}); + + dsdp(1,:) = dsdp(1,:) * exp(Phi(1)); + dsdx = []; +end + + diff --git a/subfunctions/g_sigmoid.m b/demos/_models/g_sigmoid.m similarity index 65% rename from subfunctions/g_sigmoid.m rename to demos/_models/g_sigmoid.m index ade423e9..d3e808ad 100644 --- a/subfunctions/g_sigmoid.m +++ b/demos/_models/g_sigmoid.m @@ -2,34 +2,34 @@ % partially observable sigmoid mapping n = size(Xt,1); -try - ind1 = inG.ind; -catch - ind1 = 1:n; +if ~ isfield (inG, 'ind') + inG.ind = 1 : n; end + G = eye(n); -G = G(ind1,:); +G = G(inG.ind,:); -try +if size(Phi,1) >=1 G0 = G; G = Phi(1).*G; end -if size(Phi,1) >=2 - [Sx,dsdx,dsdp] = sigm(Xt,inG,Phi(2:end)); +if size(Phi,1) >=2 + [Sx,dsdx, dsdp] = VBA_sigmoid(Xt,inG,'slope',exp(Phi(2)),'derivatives',{'slope'}); + dsdp = dsdp * exp(Phi(2)); else - [Sx,dsdx] = sigm(Xt,inG); + [Sx,dsdx] = VBA_sigmoid(Xt,inG); end gx = G*Sx(:); dG_dX = [G*diag(dsdx(:))]'; -dG_dPhi = zeros(size(Phi,1),length(ind1)); +dG_dPhi = zeros(size(Phi,1),length(inG.ind)); if size(Phi,1) >=1 dG_dPhi(1,:) = [G0*Sx(:)]'; end if size(Phi,1) >=2 - dG_dPhi(2:end,:) = dsdp(:,ind1); + dG_dPhi(2:end,:) = dsdp(:,inG.ind); end diff --git a/subfunctions/g_softmax.m b/demos/_models/g_softmax.m similarity index 90% rename from subfunctions/g_softmax.m rename to demos/_models/g_softmax.m index 9cafd362..a5b7ae47 100644 --- a/subfunctions/g_softmax.m +++ b/demos/_models/g_softmax.m @@ -15,9 +15,9 @@ dQ = (x(1)-x(2)); if length(P)>1 - gx = sig( beta*dQ + P(2)); + gx = VBA_sigmoid( beta*dQ + P(2)); else - gx = sig( beta*dQ ); + gx = VBA_sigmoid( beta*dQ ); end dgdx = zeros(size(x,1),1); dgdx(1) = beta*gx*(1-gx); diff --git a/subfunctions/g_softmax4decoding.m b/demos/_models/g_softmax4decoding.m similarity index 100% rename from subfunctions/g_softmax4decoding.m rename to demos/_models/g_softmax4decoding.m diff --git a/subfunctions/g_ttest.m b/demos/_models/g_ttest.m similarity index 88% rename from subfunctions/g_ttest.m rename to demos/_models/g_ttest.m index 965007c1..c01b7e0e 100644 --- a/subfunctions/g_ttest.m +++ b/demos/_models/g_ttest.m @@ -5,7 +5,7 @@ sP = P; dsPdP = ones(numel(P),1); %%% sparse laplace priors on phi_2 -[sP(2),dsPdP(2)] = sparsify(P(2),log(2)); +[sP(2),dsPdP(2)] = VBA_sparsifyPrior (P(2)); % prediction @@ -13,7 +13,7 @@ sP(1) + sP(2) ]; % derivatives -dgdP = diag(dsPdP,0); +dgdP = diag(dsPdP); dgdP(1,2) = 1*dsPdP(1); dgdx = []; diff --git a/subfunctions/g_u2c.m b/demos/_models/g_u2c.m similarity index 64% rename from subfunctions/g_u2c.m rename to demos/_models/g_u2c.m index 5166c340..569a077d 100644 --- a/subfunctions/g_u2c.m +++ b/demos/_models/g_u2c.m @@ -4,9 +4,4 @@ dv = P(in.ind(:,1)) - P(in.ind(:,2)); % relative value of chosen item b = exp(P(in.temp)); % behavioural temperature -gx = sig(dv/b); % probability of picking the first item - -function s= sig(x) -s = 1./(1+exp(-x)); -s(s<1e-3) = 1e-3; -s(s>1-1e-3) = 1-1e-3; \ No newline at end of file +gx = VBA_sigmoid(dv/b, 'finite', 1e-3); % probability of picking the first item \ No newline at end of file diff --git a/subfunctions/g_u2p.m b/demos/_models/g_u2p.m similarity index 67% rename from subfunctions/g_u2p.m rename to demos/_models/g_u2p.m index 3bbc7169..419d4bd6 100644 --- a/subfunctions/g_u2p.m +++ b/demos/_models/g_u2p.m @@ -5,9 +5,4 @@ mu = x(1:in.n); % expectation (E[x] = mu) dv = mu(u(in.iu(1))) - mu(u(in.iu(2))); % relative value of item 1 b = exp(P(in.temp)); % behavioural temperature -gx = sig(dv/b); % probability of picking the first item - -function s= sig(x) -s = 1./(1+exp(-x)); -s(s<1e-3) = 1e-3; -s(s>1-1e-3) = 1-1e-3; \ No newline at end of file +gx = VBA_sigmoid(dv/b, 'finite', 1e-3); % probability of picking the first item diff --git a/demos/_models/g_vgo.m b/demos/_models/g_vgo.m new file mode 100644 index 00000000..edea4285 --- /dev/null +++ b/demos/_models/g_vgo.m @@ -0,0 +1,2 @@ +function [gx] = g_vgo(x,P,u,in) +gx = VBA_sigmoid(exp(P(1))*x+P(2)); \ No newline at end of file diff --git a/subfunctions/g_wrap_perseveration.m b/demos/_models/g_wrap_perseveration.m similarity index 90% rename from subfunctions/g_wrap_perseveration.m rename to demos/_models/g_wrap_perseveration.m index 455f6b51..de77d1fa 100644 --- a/subfunctions/g_wrap_perseveration.m +++ b/demos/_models/g_wrap_perseveration.m @@ -26,11 +26,11 @@ g0 = feval(in.g0,x,P0,u,in.in0); % P(y_t=1) without persevration lastMove = u(2); -if isweird(lastMove) +if VBA_isWeird (lastMove) gx = g0; % no perseveration else - dV = invsigmoid(g0); % V1-V0 + dV = VBA_sigmoid(g0, 'inverse', true); % V1-V0 beta = P(in.indbeta); % perseveration weight Bt = 2*lastMove - 1; % 1 if y_{t-1}=1 and -1 otherwise - gx = sigmoid(dV + beta.*Bt); % perseverative tendency + gx = VBA_sigmoid(dV + beta.*Bt); % perseverative tendency end diff --git a/subfunctions/h_Id.m b/demos/_models/h_Id.m similarity index 100% rename from subfunctions/h_Id.m rename to demos/_models/h_Id.m diff --git a/subfunctions/h_goNogo.m b/demos/_models/h_goNogo.m similarity index 100% rename from subfunctions/h_goNogo.m rename to demos/_models/h_goNogo.m diff --git a/subfunctions/h_randOutcome.m b/demos/_models/h_randOutcome.m similarity index 100% rename from subfunctions/h_randOutcome.m rename to demos/_models/h_randOutcome.m diff --git a/subfunctions/h_truefalse.m b/demos/_models/h_truefalse.m similarity index 100% rename from subfunctions/h_truefalse.m rename to demos/_models/h_truefalse.m diff --git a/subfunctions/h_whichItem.m b/demos/_models/h_whichItem.m similarity index 100% rename from subfunctions/h_whichItem.m rename to demos/_models/h_whichItem.m diff --git a/subfunctions/v_discounting.m b/demos/_models/v_discounting.m similarity index 97% rename from subfunctions/v_discounting.m rename to demos/_models/v_discounting.m index c663106b..50a343c6 100755 --- a/subfunctions/v_discounting.m +++ b/demos/_models/v_discounting.m @@ -38,6 +38,6 @@ case 'basis' [tmp,i1] = min((in.gx-t).^2); [tmp,i2] = min((in.gy-R).^2); - v = vec(in.bf(i1,i2,:))'*P; + v = VBA_vec(in.bf(i1,i2,:))'*P; end diff --git a/factorial_struct.m b/factorial_struct.m deleted file mode 100644 index 79cf1d14..00000000 --- a/factorial_struct.m +++ /dev/null @@ -1,63 +0,0 @@ -function structList = factorial_struct(varargin) -% FACTORIAL_STRUCT -% structList = factorial_struct('param1', {value1a, value1b,...}, 'param2', 1:n, ...) -% provide a structure with fields 'param1', 'param2', ... whose values are -% set in a factorial design. -% eg: -% -% structList(1).param1 = value1_a; -% structList(1).param2 = 1; -% -% structList(2).param1 = value1_a; -% structList(2).param2 = 2; -% ... -% -% structList(n).param1 = value1_a; -% structList(n).param2 = n; -% -% structList(n+1).param1 = value1_b; -% structList(n+1).param2 = 1; -% ... -% -% structList(2*n).param1 = value1_b; -% structList(2*n).param2 = n; -% - - % ________________________________________________________ - % initialize struct - structList = struct(); - - % ________________________________________________________ - % recusrive field adjonction - if ~isempty(varargin) - - % extract first variable name and potential values - argName = varargin{1}; - argValueList = varargin{2}; - - if ~iscell(argValueList) - try, argValueList = num2cell(argValueList); end - end - - % get structure for remaining parameters - subStructList = factorial_struct(varargin{3:end}); - - % iterate over potential value, - for iValue = 1:numel(argValueList) - [subStructList.(argName)] = deal(argValueList{iValue}); - structList = struct_cat(structList,subStructList); - end - - % sort field by calling order - structList = orderfields(structList,varargin(1:2:end)); - end - -end - -function s = struct_cat(s1,s2) - try - s = [s1 , s2]; - catch - s=s2; - end -end diff --git a/getF.m b/getF.m deleted file mode 100644 index 992ea7cd..00000000 --- a/getF.m +++ /dev/null @@ -1,18 +0,0 @@ -function [F,FH0] = getF(posterior,out,Laplace) -% Free Energy lower bound on log model evidence -% function [F,FH0] = getF(posterior,out,Laplace) -% IN: -% - posterior: the 'posterior' structure after VB inversion -% - out: the 'out' structure after VB inversion -% - Laplace: switch variable for the Laplace Free Energy approximation -% OUT: -% - F: the Free Energy lower bound on the log model evidence -% - FH0: the log evidence under the null - -if nargin == 3 - out.options.Laplace = Laplace; -end - -[F] = VBA_FreeEnergy(posterior,out.suffStat,out.options); - -[FH0] = VBA_LMEH0(out.y,out.options); \ No newline at end of file diff --git a/getHyperpriors.m b/getHyperpriors.m deleted file mode 100644 index ec33a00a..00000000 --- a/getHyperpriors.m +++ /dev/null @@ -1,48 +0,0 @@ -function [a, b]=getHyperpriors(y,p_min,p_max) -%% compute hyperpriors parameters given you want to eplain between p_min -% and p_max fraction of the total variance. -%% ex. -% [a, b]=getHyperpriors(200,.1,.9); -% figure; -% r=random('gam',a,1/b,1,5000); -% hist(r,50); -% hold on -% plot([prec_min, prec_max],[100 100],'r'); - -%% check parameters -if nargin==1 - p_min=.1; - p_max=.9; -elseif nargin==3 - if any([p_min, p_max]<0) || any([p_min, p_max]>1) - error('*** getHyperpriors: p_min and p_max should be between 0 and 1.'); - end - if p_min > p_max - error('*** getHyperpriors: p_min should be inferior to p_max.'); - end -else - error('*** getHyperpriors: wrong number of argument'); -end - -p_max = min(p_max, 1-eps); - -if numel(y) == 1 % retrocompatibility - variance = y; - warning('getHyperpriors: first argument should be a data vector'); -else - y(isnan(y)) = []; - variance = var(vec(y)); -end - -% residual variance -var_min = variance*(1-p_max); -var_max = variance*(1-p_min); - -% expressed as precision -prec_min = 1/var_max; -prec_max = 1/var_min; - -% approximate 98% confidence interval -a = 6*((prec_min+prec_max)^2)/((prec_max-prec_min)^2); -b = 12*(prec_min+prec_max)/((prec_max-prec_min)^2); - diff --git a/getKernels.m b/getKernels.m deleted file mode 100644 index c77dc878..00000000 --- a/getKernels.m +++ /dev/null @@ -1,85 +0,0 @@ -function [H1,K1,tgrid] = getKernels(posterior,out,dcm) - -% This function derives the response kernels of the system -% function [H1,K1,tgrid] = getKernels(posterior,out,dcm) -% IN: -% - posterior,out: the output of the system inversion -% - dcm: flag for dcm (does not compute hemodynamic states kernels) -% OUT: -% - H1: the (pxtxnu) output impulse response function, where p is the -% dimension of the data, t is the number of time samples and nu is the -% number of inputs to the system -% - K1: the (nxtxnu) state impulse response function, where n is the -% number of states. NB: for DCM models, this is the neural impulse -% response function... -% - tgrid: the time grid over which the kernels are estimated - -if isequal(out.dim.n_t,1) ... - || out.dim.u < 1 ... - || isempty(out.options.f_fname) ... - % not a dynamical system - H1 = []; - K1 = []; - tgrid = []; - return -end - -if nargin <3 - dcm = 0; -else - dcm = ~~dcm; -end - -nu = out.dim.u; - -n = out.dim.n; -p = out.dim.p; -out.options.microU = 1; % for impulse response functions... -if dcm - % remove confounds,... - out.options.inG.confounds.X0 = []; - % only look at neuronal states,... - n = p; %size(out.options.inF.C,1); - % and get kernels over 16 secs - TR = out.options.inF.deltat*out.options.decim; - out.options.dim.n_t = ceil(16./TR); -end -nt = out.options.dim.n_t*out.options.decim; - -% ensure steady state initial conditions and throw away state noise -% estimate -posterior.muX0 = zeros(size(posterior.muX0)); -out.suffStat.dx = []; - -% pre-allocate response kernels -H1 = zeros(p,nt,nu); -K1 = zeros(n,nt,nu); - -% derive kernels by integrating the system -gotit = 0; -for i=1:nu - try - U = zeros(nu,nt); - U(i,1) = 1; - % get output impulse response - [x,gx,tgrid] = VBA_microTime(posterior,U,out); - H1(:,:,i) = gx(:,2:end); - if dcm && isfield(out.options.inF,'n5') - K1(:,:,i) = x(out.options.inF.n5,2:end); - else - K1(:,:,i) = x(:,2:end); - end - gotit = 1; - end -end -% clean up kernels -if ~gotit - H1 = []; - K1 = []; - tgrid = []; -else - tgrid = tgrid(2:end); - K1(abs(K1)<=1e-8) = 0; - H1(abs(H1)<=1e-8) = 0; -end - diff --git a/isbinary.m b/isbinary.m deleted file mode 100644 index 0612896b..00000000 --- a/isbinary.m +++ /dev/null @@ -1,29 +0,0 @@ -function flag = isbinary(X) -% true if X contains only 0 or 1 entries -% function [flag] = isbinary(X) -% IN: -% - X: N-D matrix (or cell array of matrices) to be checked -% OUT: -% - flag: 1 id X is binary, 0 if not -if iscell(X) - ok = 1; - for i=1:numel(X) - ok = ok & isbinary(X{i}); - end - flag = ok; -elseif isstruct(X) - ok = 1; - fn = fieldnames(X); - for i=1:length(fn) - ok = ok & isbinary(getfield(X,fn{i})); - end - flag = ok; -elseif isnumeric(X) || islogical(X) - flag = 0; - if all(ismember(X(:),[0,1])) - flag = 1; - end -else - flag = 0; -end - diff --git a/isweird.m b/isweird.m deleted file mode 100644 index db567ff6..00000000 --- a/isweird.m +++ /dev/null @@ -1,31 +0,0 @@ -function [flag] = isweird(X) -% true if matrix X contains any Infs, NaNs or non real entries -% function [flag] = isweird(X) -% IN: -% - X: N-D matrix (or cell array of matrices) to be checked -% OUT: -% - flag: 1 id X is weird, 0 if not, -1 if not numeric (e.g. string) - -if iscell(X) -% ok = 1; -% for i=1:numel(X) -% ok = ok & ~isweird(X{i}); -% end -% flag = ~ok; - flag = any(cell2mat(cellfun(@isweird,X,'UniformOutput',false))); -elseif isstruct(X) - ok = 1; - fn = fieldnames(X); - for i=1:length(fn) - ok = ok & ~isweird(getfield(X,fn{i})); - end - flag = 1*(~ok); -elseif isnumeric(X) || islogical(X) - flag = 0; - if any(isinf(X(:)) | isnan(X(:)) | ~isreal(X(:))) - flag = 1; - end -else - flag = -1; -end - diff --git a/legacy/GaussNewton.m b/legacy/GaussNewton.m new file mode 100644 index 00000000..a7fe5168 --- /dev/null +++ b/legacy/GaussNewton.m @@ -0,0 +1,13 @@ +function [opt,sigma,out] = GaussNewton(fname,init,options) +% legacy code +s = warning ('on'); +warning ('*** The function `GaussNewton` is now deprecated and has beend renamed `VBA_GaussNewton`.') +warning (s); + +% fallback +try + options; +catch + options = struct; +end +[opt,sigma,out] = VBA_GaussNewton(fname,init,options); \ No newline at end of file diff --git a/legacy/README.md b/legacy/README.md new file mode 100644 index 00000000..fed43f85 --- /dev/null +++ b/legacy/README.md @@ -0,0 +1,5 @@ +# Legacy code + +This folder contains deprecated functions. To ensure backward compatibility, calling any of those functions will try to fallback to a replacement function, or produce an error if no alternative can be found. + +In any case, those functions also issue some warning to help adapting user code. diff --git a/legacy/VBA_getISqrtMat.m b/legacy/VBA_getISqrtMat.m new file mode 100644 index 00000000..a0fa7fe4 --- /dev/null +++ b/legacy/VBA_getISqrtMat.m @@ -0,0 +1,11 @@ +function S = VBA_getISqrtMat(C,inv) +% legacy code +s = warning ('on'); +warning ('*** The function `VBA_getISqrtMat` is now deprecated. Please see `VBA_sqrtm` for an alternative.') +warning (s); + +% fallback +if nargin < 2 + inv = true; +end +S = VBA_getISqrtMat(C, inv); \ No newline at end of file diff --git a/VBA_groupBMCbtw.m b/legacy/VBA_groupBMCbtw.m similarity index 100% rename from VBA_groupBMCbtw.m rename to legacy/VBA_groupBMCbtw.m diff --git a/legacy/VBA_sample.m b/legacy/VBA_sample.m new file mode 100644 index 00000000..b67243eb --- /dev/null +++ b/legacy/VBA_sample.m @@ -0,0 +1,36 @@ +function y = VBA_sample(form,suffStat,N) +% legacy code +s = warning ('on'); +warning ('*** The function `VBA_sample` is now deprecated. Please see `VBA_random` for an alternative.') +warning (s); + +if nargin < 3 + N = 1; +end + +switch form + case 'gaussian' + if isscalar(suffstat.mu) + N = {1, N}; + else + N = {N}; + end + + y = VBA_random ('Gaussian', suffStat.mu, suffStat.Sigma, N{:}); + + case 'gamma' + y = VBA_random ('Gamma', suffStat.a, suffStat.b, 1, N); + + case 'dirichlet' + y = VBA_random ('Dirichlet', suffStat.d, N); + + case 'bernoulli' + y = VBA_random ('Bernoulli', suffStat.p, 1, N); + + case 'binomial' + y = VBA_random ('Binomial', suffStat.n, suffstat.p, 1, N); + + case 'multinomial' + y = VBA_random ('Multinomial', suffStat.n, suffstat.p, N); + +end diff --git a/legacy/checkGX_binomial.m b/legacy/checkGX_binomial.m new file mode 100644 index 00000000..53b432ae --- /dev/null +++ b/legacy/checkGX_binomial.m @@ -0,0 +1,8 @@ +function x = checkGX_binomial (x, lim) +% legacy code +s = warning ('on'); +warning ('*** The function `checkGX_binomial` is now deprecated and has been renamed `VBA_finiteBinomial` (same syntax).') +warning (s); + +% fallback +x = VBA_finiteBinomial (x, lim); diff --git a/legacy/cov2corr.m b/legacy/cov2corr.m new file mode 100644 index 00000000..86a20576 --- /dev/null +++ b/legacy/cov2corr.m @@ -0,0 +1,8 @@ +function y = cov2corr(x) +% legacy code +s = warning ('on'); +warning ('*** The function `cov2corr` is now deprecated. Please see `VBA_cov2corr` (same syntax).') +warning (s); + +% fallback +y = VBA_cov2corr (x); \ No newline at end of file diff --git a/legacy/empiricalHist.m b/legacy/empiricalHist.m new file mode 100644 index 00000000..1ce34b47 --- /dev/null +++ b/legacy/empiricalHist.m @@ -0,0 +1,8 @@ +function [py, gridy] = empiricalHist (y, pr) +% legacy code +s = warning ('on'); +warning ('*** The function `empiricalHist` is now deprecated and has beend renamed `VBA_empiricalDensity`.') +warning (s); + +% fallback +[py, gridy] = VBA_empiricalDensity (y, pr); \ No newline at end of file diff --git a/legacy/getHyperpriors.m b/legacy/getHyperpriors.m new file mode 100644 index 00000000..4f9527e4 --- /dev/null +++ b/legacy/getHyperpriors.m @@ -0,0 +1,8 @@ +function [a, b] = getHyperpriors(y, p_min, p_max) +% legacy code +s = warning ('on'); +warning ('*** The function `getHyperpriors` is now deprecated. Please see `VBA_guessHyperpriors` for an alternative.') +warning (s); + +% fallback +[a, b] = VBA_guessHyperpriors (y, [p_min, p_max]); \ No newline at end of file diff --git a/legacy/getSubplots.m b/legacy/getSubplots.m new file mode 100644 index 00000000..2c9c470e --- /dev/null +++ b/legacy/getSubplots.m @@ -0,0 +1,8 @@ +function getSubplots (ha, style) +% legacy code +s = warning ('on'); +warning ('*** The function `getSubplots` is now deprecated and has been renamed `VBA_getSubplots` (same syntax).') +warning (s); + +% fallback +VBA_getSubplots (ha, style); \ No newline at end of file diff --git a/legacy/get_MCMC_predictiveDensity.m b/legacy/get_MCMC_predictiveDensity.m new file mode 100644 index 00000000..26114c4e --- /dev/null +++ b/legacy/get_MCMC_predictiveDensity.m @@ -0,0 +1,24 @@ +function [pX,gX,pY,gY,X,Y] = get_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim,N,np,lx,ly) +% legacy code +s = warning ('on'); +warning ('*** The function `get_MCMC_predictiveDensity` is now deprecated. Please use `VBA_MCMC_predictiveDensity` instead (same syntax).') +warning (s); + +% fallback +switch nargin + case 10 + [pX,gX,pY,gY,X,Y] = VBA_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim,N,np,lx,ly); + case 9 + [pX,gX,pY,gY,X,Y] = VBA_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim,N,np,lx); + case 8 + [pX,gX,pY,gY,X,Y] = VBA_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim,N,np); + case 7 + [pX,gX,pY,gY,X,Y] = VBA_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim,N); + case 6 + [pX,gX,pY,gY,X,Y] = VBA_MCMC_predictiveDensity(f_fname,g_fname,u,n_t,options,dim); + otherwise + error('VBA_MCMC_predictiveDensity: wrong number of arguments'); +end + + + diff --git a/legacy/get_MCMC_predictiveDensity_fb.m b/legacy/get_MCMC_predictiveDensity_fb.m new file mode 100644 index 00000000..21625302 --- /dev/null +++ b/legacy/get_MCMC_predictiveDensity_fb.m @@ -0,0 +1,25 @@ +function [pX,gX,pY,gY,X,Y,U] = get_MCMC_predictiveDensity_fb(f_fname,g_fname,u,n_t,options,dim,fb,N,np,lx,ly) + +% legacy code +s = warning ('on'); +warning ('*** The function `get_MCMC_predictiveDensity_fb` is now deprecated. Please use `VBA_MCMC_predictiveDensity_fb` instead (same syntax).') +warning (s); + +% fallback +switch nargin + case 11 + [pX,gX,pY,gY,X,Y] = VBA_MCMC_predictiveDensity_fb(f_fname,g_fname,u,n_t,options,dim,fb,N,np,lx,ly); + case 10 + [pX,gX,pY,gY,X,Y] = VBA_MCMC_predictiveDensity_fb(f_fname,g_fname,u,n_t,options,dim,fb,N,np,lx); + case 9 + [pX,gX,pY,gY,X,Y] = VBA_MCMC_predictiveDensity_fb(f_fname,g_fname,u,n_t,options,dim,fb,N,np); + case 8 + [pX,gX,pY,gY,X,Y] = VBA_MCMC_predictiveDensity_fb(f_fname,g_fname,u,n_t,options,dim,fb,N); + case 7 + [pX,gX,pY,gY,X,Y] = VBA_MCMC_predictiveDensity_fb(f_fname,g_fname,u,n_t,options,dim,fb); + otherwise + error('VBA_MCMC_predictiveDensity_fb: wrong number of arguments'); +end + + + diff --git a/subfunctions/invSparsify.m b/legacy/invSparsify.m similarity index 100% rename from subfunctions/invSparsify.m rename to legacy/invSparsify.m diff --git a/legacy/invsigmoid.m b/legacy/invsigmoid.m new file mode 100644 index 00000000..6b7e3659 --- /dev/null +++ b/legacy/invsigmoid.m @@ -0,0 +1,8 @@ +function y = invsigmoid(x) +% legacy code +s = warning ('on'); +warning ('*** The function `invsigmoid` is now deprecated. Please see `VBA_sigmoid` for an alternative.') +warning (s); + +% fallback +y = VBA_sigmoid (x, 'inverse', true); \ No newline at end of file diff --git a/legacy/isbinary.m b/legacy/isbinary.m new file mode 100644 index 00000000..2e98f9dc --- /dev/null +++ b/legacy/isbinary.m @@ -0,0 +1,8 @@ +function flag = isbinary (X) +% legacy code +s = warning ('on'); +warning ('*** The function `isbinary` is now deprecated and has been renamed `VBA_isBinary` (same syntax).') +warning (s); + +% fallback +flag = VBA_isBinary (X); \ No newline at end of file diff --git a/legacy/isweird.m b/legacy/isweird.m new file mode 100644 index 00000000..096149d2 --- /dev/null +++ b/legacy/isweird.m @@ -0,0 +1,8 @@ +function [flag] = isweird (X) +% legacy code +s = warning ('on'); +warning ('*** The function `isweird` is now deprecated and has been renamed `VBA_isWeird` (same syntax).') +warning (s); + +% fallback +flag = VBA_isWeird (X); diff --git a/legacy/iswithin.m b/legacy/iswithin.m new file mode 100644 index 00000000..5ab93a2a --- /dev/null +++ b/legacy/iswithin.m @@ -0,0 +1,8 @@ +function [flag] = iswithin (X, bounds) +% legacy code +s = warning ('on'); +warning ('*** The function `iswithin` is now deprecated and has been renamed `VBA_isInRange` (same syntax).') +warning (s); + +% fallback +flag = VBA_isInRange (X, bounds); diff --git a/legacy/numericDiff.m b/legacy/numericDiff.m new file mode 100644 index 00000000..9a770e4f --- /dev/null +++ b/legacy/numericDiff.m @@ -0,0 +1,8 @@ +function [dfdx, fx] = numericDiff(fName, idxArg2Diff, varargin) +% legacy code +s = warning ('on'); +warning ('*** The function `numericDiff` is now deprecated. Please use `VBA_numericDiff` (same syntax).') +warning (s); + +% fallback +[dfdx, fx] = VBA_numericDiff(fName, idxArg2Diff, varargin) \ No newline at end of file diff --git a/legacy/sampleFromArbitraryP.m b/legacy/sampleFromArbitraryP.m new file mode 100644 index 00000000..28ff4543 --- /dev/null +++ b/legacy/sampleFromArbitraryP.m @@ -0,0 +1,17 @@ +function [X] = sampleFromArbitraryP (p, gridX ,N) +% legacy code +s = warning ('on'); +warning ('*** The function `sampleFromArbitraryP` is now deprecated. Please see `VBA_random` for an alternative.') +warning (s); + +% fallback +if nargin < 3 + N = 1; +end + +if isvector (gridX) + N = {N, 1}; +else + N = {N}; +end +X = VBA_random ('Arbitrary', p, gridX, N{:}); \ No newline at end of file diff --git a/legacy/sgm.m b/legacy/sgm.m new file mode 100644 index 00000000..6af92ded --- /dev/null +++ b/legacy/sgm.m @@ -0,0 +1,11 @@ +function y = sgm(x,a) +% legacy code +s = warning ('on'); +warning ('*** The function `sgm` is now deprecated. Please see `VBA_sigmoid` for an alternative.') +warning (s); + +% fallback +if nargin == 1 + a = 1; +end +y = VBA_sigmoid (x, 'scale', a); diff --git a/legacy/sig.m b/legacy/sig.m new file mode 100644 index 00000000..a14786a2 --- /dev/null +++ b/legacy/sig.m @@ -0,0 +1,8 @@ +function y = sig(x) +% legacy code +s = warning ('on'); +warning ('*** The function `sig` is now deprecated. Please see `VBA_sigmoid` for an alternative.') +warning (s); + +% fallback +y = VBA_sigmoid (x); \ No newline at end of file diff --git a/legacy/sigm.m b/legacy/sigm.m new file mode 100755 index 00000000..8e0093cc --- /dev/null +++ b/legacy/sigm.m @@ -0,0 +1,41 @@ +function [Sx,dsdx,dsdp] = sigm(x,in,Phi) + +% legacy code +s = warning ('on'); +warning ('*** The function `sigm` is now deprecated. Please see `VBA_sigmoid` for an alternative.') +warning (s); + +% fallback +if nargin < 2 + in = struct (); +end + +if nargin < 3 + Phi = []; +end + +if isfield(in, 'G0') + in.scale = in.G0; + in = rmfield(in,'G0'); +end + +if isfield(in, 'S0') + in.offest = in.S0; + in = rmfield(in,'S0'); +end + +if numel(Phi) > 0 + in.slope = exp(Phi(1)); + in.derivatives = {'slope'}; +end + +if numel(Phi) > 1 + in.center = Phi(2); + in.derivatives = {'slope', 'center'}; +end + +[Sx,dsdx,dsdp] = VBA_sigmoid (x, in); + +if numel(Phi) > 0 + dsdp(1,:) = dsdp(1,:) * exp(Phi(1)); +end diff --git a/legacy/sigmoid.m b/legacy/sigmoid.m new file mode 100644 index 00000000..3cf74860 --- /dev/null +++ b/legacy/sigmoid.m @@ -0,0 +1,8 @@ +function y = sigmoid(x) +% legacy code +s = warning ('on'); +warning ('*** The function `sigmoid` is now deprecated. Please see `VBA_sigmoid` for an alternative.') +warning (s); + +% fallback +y = VBA_sigmoid (x); \ No newline at end of file diff --git a/legacy/simulateNLSS.m b/legacy/simulateNLSS.m new file mode 100644 index 00000000..6cc63d85 --- /dev/null +++ b/legacy/simulateNLSS.m @@ -0,0 +1,12 @@ +function [y,x,x0,eta,e,u] = simulateNLSS(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0) +% legacy code +s = warning ('on'); +warning ('*** The function `simulateNLSS` is now deprecated. Please use `VBA_simulate` instead (same syntax).') +warning (s); + +% fallback +try + [y,x,x0,eta,e,u] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0); +catch + [y,x,x0,eta,e,u] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options); +end \ No newline at end of file diff --git a/legacy/simulateNLSS_fb.m b/legacy/simulateNLSS_fb.m new file mode 100644 index 00000000..a058398d --- /dev/null +++ b/legacy/simulateNLSS_fb.m @@ -0,0 +1,8 @@ +function [y,x,x0,eta,e,u] = simulateNLSS_fb(n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0,fb) +% legacy code +s = warning ('on'); +warning ('*** The function `simulateNLSS_fb` is now deprecated. Please use `VBA_simulate` instead (same syntax).') +warning (s); + +% fallback +[y,x,x0,eta,e,u] = VBA_simulate (n_t,f_fname,g_fname,theta,phi,u,alpha,sigma,options,x0,fb); diff --git a/legacy/sparseTransform.m b/legacy/sparseTransform.m new file mode 100644 index 00000000..bce60be2 --- /dev/null +++ b/legacy/sparseTransform.m @@ -0,0 +1,8 @@ +function [sx,dsdx] = sparseTransform(x,P) +% legacy code +s = warning ('on'); +warning ('*** The function `sparseTransform` is now deprecated. Please see `VBA_sparsifyPrior` for an alternative.') +warning (s); + +% fallback +[sx,dsdx] = VBA_sparsifyPrior(x, log(2), 1/P); \ No newline at end of file diff --git a/legacy/sparsify.m b/legacy/sparsify.m new file mode 100644 index 00000000..b5b1dd33 --- /dev/null +++ b/legacy/sparsify.m @@ -0,0 +1,8 @@ +function [sx,dsdx,dsdP] = sparsify(x,P) +% legacy code +s = warning ('on'); +warning ('*** The function `sparsify` is now deprecated. Please see `VBA_sparsifyPrior` for an alternative.') +warning (s); + +% fallback +[sx,dsdx,dsdP] = VBA_sparsifyPrior(x, P); \ No newline at end of file diff --git a/subfunctions/ERP_dcm.m b/legacy/trashbin/ERP_dcm.m similarity index 100% rename from subfunctions/ERP_dcm.m rename to legacy/trashbin/ERP_dcm.m diff --git a/subfunctions/compare_struct.m b/legacy/trashbin/compare_struct.m similarity index 100% rename from subfunctions/compare_struct.m rename to legacy/trashbin/compare_struct.m diff --git a/subfunctions/create2dbf.m b/legacy/trashbin/create2dbf.m similarity index 100% rename from subfunctions/create2dbf.m rename to legacy/trashbin/create2dbf.m diff --git a/subfunctions/extractFIR.m b/legacy/trashbin/extractFIR.m similarity index 60% rename from subfunctions/extractFIR.m rename to legacy/trashbin/extractFIR.m index 3daf4fb2..e8f34e10 100644 --- a/subfunctions/extractFIR.m +++ b/legacy/trashbin/extractFIR.m @@ -1,3 +1,3 @@ function out = extractFIR(x,n) -out = reshape(vec(x)',n,[]); +out = reshape(VBA_vec(x)',n,[]); out = mean(out,2)'; diff --git a/subfunctions/getFamily.m b/legacy/trashbin/getFamily.m similarity index 100% rename from subfunctions/getFamily.m rename to legacy/trashbin/getFamily.m diff --git a/subfunctions/get_ARcov.m b/legacy/trashbin/get_ARcov.m similarity index 100% rename from subfunctions/get_ARcov.m rename to legacy/trashbin/get_ARcov.m diff --git a/subfunctions/gridL_binomial.m b/legacy/trashbin/gridL_binomial.m similarity index 97% rename from subfunctions/gridL_binomial.m rename to legacy/trashbin/gridL_binomial.m index 4a37781f..22ecc4cd 100644 --- a/subfunctions/gridL_binomial.m +++ b/legacy/trashbin/gridL_binomial.m @@ -35,7 +35,8 @@ end end -[iy,ix,Max] = maxMat(logP); +[~, iy, ix] = VBA_maxMat(logP); + bestPhi = [gridPhi1(iy),gridPhi2(ix)]; if options.verbose diff --git a/stats&plots/hatch.m b/legacy/trashbin/hatch.m similarity index 100% rename from stats&plots/hatch.m rename to legacy/trashbin/hatch.m diff --git a/subfunctions/isHrfStable.m b/legacy/trashbin/isHrfStable.m similarity index 100% rename from subfunctions/isHrfStable.m rename to legacy/trashbin/isHrfStable.m diff --git a/subfunctions/logExp.m b/legacy/trashbin/logExp.m similarity index 100% rename from subfunctions/logExp.m rename to legacy/trashbin/logExp.m diff --git a/stats&plots/smooth2.m b/legacy/trashbin/smooth2.m similarity index 100% rename from stats&plots/smooth2.m rename to legacy/trashbin/smooth2.m diff --git a/legacy/vec.m b/legacy/vec.m new file mode 100644 index 00000000..fcf4b0bd --- /dev/null +++ b/legacy/vec.m @@ -0,0 +1,8 @@ +function vx = vec(X) +% legacy code +s = warning ('on'); +warning ('*** The function `vec` is now deprecated and has beend renamed `VBA_vec`.') +warning (s); + +% fallback +vx = VBA_vec (X); \ No newline at end of file diff --git a/subfunctions/BOLD_parameters.m b/modules/DCM/BOLD_parameters.m similarity index 100% rename from subfunctions/BOLD_parameters.m rename to modules/DCM/BOLD_parameters.m diff --git a/subfunctions/addConfounds2dcm.m b/modules/DCM/addConfounds2dcm.m similarity index 100% rename from subfunctions/addConfounds2dcm.m rename to modules/DCM/addConfounds2dcm.m diff --git a/subfunctions/dcm2vba.m b/modules/DCM/dcm2vba.m similarity index 100% rename from subfunctions/dcm2vba.m rename to modules/DCM/dcm2vba.m diff --git a/subfunctions/defaultHRFparams.m b/modules/DCM/defaultHRFparams.m similarity index 100% rename from subfunctions/defaultHRFparams.m rename to modules/DCM/defaultHRFparams.m diff --git a/subfunctions/extend_dcm.m b/modules/DCM/extend_dcm.m similarity index 100% rename from subfunctions/extend_dcm.m rename to modules/DCM/extend_dcm.m diff --git a/subfunctions/getOptions4dcm.m b/modules/DCM/getOptions4dcm.m similarity index 98% rename from subfunctions/getOptions4dcm.m rename to modules/DCM/getOptions4dcm.m index b2cc94ab..8c24fb6e 100644 --- a/subfunctions/getOptions4dcm.m +++ b/modules/DCM/getOptions4dcm.m @@ -77,3 +77,5 @@ options.priors = priors; options.updateHP = 1; options.inF.linearized = lin; + +options.sources = struct('type', 0, 'out', 1:dim.p); diff --git a/subfunctions/getPriors.m b/modules/DCM/getPriors.m similarity index 82% rename from subfunctions/getPriors.m rename to modules/DCM/getPriors.m index a89d0a73..80274181 100644 --- a/subfunctions/getPriors.m +++ b/modules/DCM/getPriors.m @@ -95,34 +95,29 @@ if extended dq(options.inF.r) = 100; end - priors.iQx{t} = diag(dq); -% dq = [ones(1,nreg)]; -% priors.iQy{t,1} = diag(dq); + priors.iQx{t} = diag(dq); end + % muxer -if isfield(options,'sources') - g_source = [options.sources(:).type]==0; - n_sources = numel(options.sources(g_source)); - dim_y = [options.sources(g_source).out]; -else - n_sources=1; - dim_y=nreg; -end -for n=1:n_sources % default variance for other sources - try - prec=options.sources(n).prec; - catch - prec=1; +try + gsi = find ([options.sources.type] == 0); + n_Gsources = numel(gsi); + for n=1:n_Gsources + for t = 1 : n_t + dim_y = numel (options.sources(gsi(n)).out); + priors.iQy{t,n} = eye(dim_y); + end end - for t = 1:n_t - dq = [ones(1,numel(dim_y(n)))]; - priors.iQy{t,n} = diag(dq); +catch + n_Gsources = 1; + for t = 1 : n_t + priors.iQy{t,1} = eye(nreg); end end %= precision hyperparameters -priors.a_sigma = 1e0*ones(n_sources,1); -priors.b_sigma = 1e0*ones(n_sources,1); +priors.a_sigma = 1e0*ones(n_Gsources,1); +priors.b_sigma = 1e0*ones(n_Gsources,1); if ~stochastic priors.a_alpha = Inf; priors.b_alpha = 0; diff --git a/subfunctions/get_HRFparams.m b/modules/DCM/get_HRFparams.m similarity index 100% rename from subfunctions/get_HRFparams.m rename to modules/DCM/get_HRFparams.m diff --git a/subfunctions/get_U_basis.m b/modules/DCM/get_U_basis.m similarity index 96% rename from subfunctions/get_U_basis.m rename to modules/DCM/get_U_basis.m index 92642221..85f9a4b7 100644 --- a/subfunctions/get_U_basis.m +++ b/modules/DCM/get_U_basis.m @@ -39,5 +39,5 @@ u = []; return end - [u] = feval(u_fname,[],eye(n),ut,in); + [u] = u_fname([],eye(n),ut,in); end \ No newline at end of file diff --git a/subfunctions/prepare_dcm.m b/modules/DCM/prepare_dcm.m similarity index 100% rename from subfunctions/prepare_dcm.m rename to modules/DCM/prepare_dcm.m diff --git a/subfunctions/prepare_fullDCM.m b/modules/DCM/prepare_fullDCM.m similarity index 100% rename from subfunctions/prepare_fullDCM.m rename to modules/DCM/prepare_fullDCM.m diff --git a/subfunctions/u_Fourier.m b/modules/DCM/u_Fourier.m similarity index 100% rename from subfunctions/u_Fourier.m rename to modules/DCM/u_Fourier.m diff --git a/subfunctions/u_FourierComplete.m b/modules/DCM/u_FourierComplete.m similarity index 100% rename from subfunctions/u_FourierComplete.m rename to modules/DCM/u_FourierComplete.m diff --git a/subfunctions/u_GaussianBumps.m b/modules/DCM/u_GaussianBumps.m similarity index 100% rename from subfunctions/u_GaussianBumps.m rename to modules/DCM/u_GaussianBumps.m diff --git a/subfunctions/u_RBF.m b/modules/DCM/u_RBF.m similarity index 100% rename from subfunctions/u_RBF.m rename to modules/DCM/u_RBF.m diff --git a/subfunctions/vba2dcm.m b/modules/DCM/vba2dcm.m similarity index 99% rename from subfunctions/vba2dcm.m rename to modules/DCM/vba2dcm.m index 11a64c7b..05709f74 100644 --- a/subfunctions/vba2dcm.m +++ b/modules/DCM/vba2dcm.m @@ -101,7 +101,7 @@ try kernels = out.diagnostics.kernels; catch - [kernels] = VBA_VolterraKernels(posterior,out,ceil(32/TR)); + [kernels] = VBA_getVolterraKernels(posterior,out,ceil(32/TR)); end DCM.H1 = kernels.g.m; DCM.K1 = kernels.x.m(out.options.inF.n5,:,:); diff --git a/stats&plots/Contrast_MEbins.m b/modules/GLM/Contrast_MEbins.m similarity index 100% rename from stats&plots/Contrast_MEbins.m rename to modules/GLM/Contrast_MEbins.m diff --git a/stats&plots/GLM_contrast.m b/modules/GLM/GLM_contrast.m similarity index 97% rename from stats&plots/GLM_contrast.m rename to modules/GLM/GLM_contrast.m index 57fdd0df..f78f3828 100644 --- a/stats&plots/GLM_contrast.m +++ b/modules/GLM/GLM_contrast.m @@ -69,7 +69,7 @@ % check basic numerical requirements try - if isweird(y) + if VBA_isWeird (y) disp('Error: data contains weird values!') return end @@ -139,8 +139,8 @@ SS_tot = sum((y(:,i)-mean(y(:,i))).^2); SS_err = sum(e(:,i).^2); R2(i) = 1-(SS_err/SS_tot); - R2_a(i) = FtoR2(stat(i).^2,1,df); - [tmp,ks(i)] = VBA_kstest(nanzscore(e)); + R2_a(i) = VBA_FtoR2(stat(i).^2,1,df); + [tmp,ks(i)] = VBA_kstest(zscore(e)); if verbose && p>1 fprintf(1,repmat('\b',1,8)) fprintf(1,'%6.2f %%',100*i/p) @@ -185,11 +185,11 @@ SS_tot = sum((y(:,i)-mean(y(:,i))).^2); SS_err = sum(e(:,i).^2); R2(i) = 1-(SS_err/SS_tot); - R2_a(i) = FtoR2(stat(i),df(1),df(2)); + R2_a(i) = VBA_FtoR2(stat(i),df(1),df(2)); % SS_tot_a = sum((y_a(:,i)-mean(y_a(:,i))).^2); % SS_err_a = sum((y_a(:,i)-yhat_a(:,i)).^2); % R2_a(i) = 1-(SS_err_a/SS_tot_a); - [tmp,ks(i)] = VBA_kstest(nanzscore(e)); + [tmp,ks(i)] = VBA_kstest(zscore(e)); if verbose && p>1 fprintf(1,repmat('\b',1,8)) fprintf(1,'%6.2f %%',100*i/p) @@ -303,7 +303,7 @@ % parameters' correlation matrix handles.ha(3) = subplot(3,2,5,'parent',handles.hf,'nextplot','add'); -imagesc(cov2corr(iC),'parent',handles.ha(3)) +imagesc(VBA_cov2corr(iC),'parent',handles.ha(3)) axis(handles.ha(3),'square') axis(handles.ha(3),'equal') axis(handles.ha(3),'tight') @@ -361,7 +361,7 @@ all.handles = handles; -try,getSubplots;end +try,VBA_getSubplots ();end function pBP = myBreuschPaganTest(e,X) X = [X,ones(size(X,1),1)]; @@ -384,7 +384,7 @@ function myResiduals(e1,e2) pe = exp(-0.5*(ge-me).^2./ve)./(sqrt(ve*2*pi)); dx = diff(x); dx = dx(1); -[epe,ege] = empiricalHist(e,1); +[epe,ege] = VBA_empiricalDensity(e,1); for i=1:length(x) in = find(ge>x(i)-dx/2&ge=0.95); % posterior estimates -p = VBA_BMA({p0;p1},[F0;F1]); +p = VBA_BMA([p0;p1],[F0;F1]); % posterior.mu = p.muPhi ; -posterior.mu = [ p.muPhi(1) , sparsify(p.muPhi(2),log(2)) ]; +posterior.mu = [ p.muPhi(1) , VBA_sparsifyPrior(p.muPhi(2)) ]; % Formating stat=struct; diff --git a/stats&plots/doROC.m b/modules/classical_statistics/doROC.m similarity index 96% rename from stats&plots/doROC.m rename to modules/classical_statistics/doROC.m index 3671acec..ba0cf719 100644 --- a/stats&plots/doROC.m +++ b/modules/classical_statistics/doROC.m @@ -40,8 +40,8 @@ % area under the ROC curve p = -trapz(1-out.TN,out.TP); -[hp,gp] = empiricalHist(xp(:),1); -[hn,gn] = empiricalHist(xn(:),1); +[hp,gp] = VBA_empiricalDensity(xp(:),1); +[hn,gn] = VBA_empiricalDensity(xn(:),1); % find FPR=0.05 threshold and power d = (0.05-out.FP).^2; diff --git a/classification/BMM/MixtureOfBinomials.m b/modules/classification/BMM/MixtureOfBinomials.m similarity index 100% rename from classification/BMM/MixtureOfBinomials.m rename to modules/classification/BMM/MixtureOfBinomials.m diff --git a/classification/BMM/generateBMM.m b/modules/classification/BMM/generateBMM.m similarity index 86% rename from classification/BMM/generateBMM.m rename to modules/classification/BMM/generateBMM.m index 1b1a642d..210293c6 100644 --- a/classification/BMM/generateBMM.m +++ b/modules/classification/BMM/generateBMM.m @@ -18,7 +18,7 @@ sumAlphak = sumAlphak+alpha(k); end for d=1:D - [y(d,i)] = sampleFromArbitraryP([lambda(d,k),1-lambda(d,k)]',[1,0]',1); + y(d, i) = VBA_random ('Bernoulli', lambda(d, k)); end labels(i,k) = 1; end diff --git a/classification/CRP/VB_CRP.m b/modules/classification/CRP/VB_CRP.m similarity index 100% rename from classification/CRP/VB_CRP.m rename to modules/classification/CRP/VB_CRP.m diff --git a/classification/CRP/simulate_CRP.m b/modules/classification/CRP/simulate_CRP.m similarity index 87% rename from classification/CRP/simulate_CRP.m rename to modules/classification/CRP/simulate_CRP.m index 1aa3e86f..a7860464 100644 --- a/classification/CRP/simulate_CRP.m +++ b/modules/classification/CRP/simulate_CRP.m @@ -18,6 +18,6 @@ nk(k) = sum(x1(1:i-1)==k); end p = [nk;alpha]./(alpha+i-1); - gridX = [1:Ki+1]'; - [x1(i)] = sampleFromArbitraryP(p,gridX,1); + x1(i) = VBA_random ('Categorical', p); + end \ No newline at end of file diff --git a/classification/GMM/PCA_MoG.m b/modules/classification/GMM/PCA_MoG.m similarity index 100% rename from classification/GMM/PCA_MoG.m rename to modules/classification/GMM/PCA_MoG.m diff --git a/classification/GMM/VBA_MoG.m b/modules/classification/GMM/VBA_MoG.m similarity index 95% rename from classification/GMM/VBA_MoG.m rename to modules/classification/GMM/VBA_MoG.m index e077170e..e9fbe2c0 100644 --- a/classification/GMM/VBA_MoG.m +++ b/modules/classification/GMM/VBA_MoG.m @@ -157,7 +157,7 @@ posterior.muEta = priors.muEta + eps*randn(dim.p,dim.K); case 'rand' for k=1:K - S = VBA_getISqrtMat(priors.SigmaEta{k},0); + S = VBA_sqrtm (priors.SigmaEta{k}); posterior.muEta(:,k) = priors.muEta(:,k) + S*randn(dim.p,1); end end @@ -181,7 +181,7 @@ for i=1:8 handles.ha(i) = subplot(3,3,i,'parent',handles.hf,'box','off'); end - try;getSubplots;end + try; VBA_getSubplots ();end plot(handles.ha(1),F,'ro') set(handles.ha(1),'nextplot','add','ygrid','on') imagesc(sqrt(dist(y)),'parent',handles.ha(6)) @@ -211,7 +211,7 @@ end set(handles.ha(7),'xlim',bounds) end - getSubplots + VBA_getSubplots (); end % Main VB scheme @@ -254,10 +254,10 @@ set(handles.hp(k),'ydata',pk(k,:),'color',col(k,:)); end end - [Ef,Vf] = Dirichlet_moments(posterior.d); + [Ef,Vf] = VBA_dirichlet_moments(posterior.d); cla(handles.ha(8)) try - plotUncertainTimeSeries(Ef,diag(Vf),[],handles.ha(8)); + plotUncertainTimeSeries(Ef,Vf,[],handles.ha(8)); catch bar(Ef,'parent',handles.ha(8),'facecolor',0.8*[1 1 1]) end @@ -391,7 +391,7 @@ return end dF = F(it) - F(it-1); -if isweird(F) || abs(dF)<=options.TolFun || it>=options.MaxIter +if VBA_isWeird (F) || abs(dF)<=options.TolFun || it>=options.MaxIter stop = 1; end @@ -444,12 +444,6 @@ end end -function [iy,ix,Max] = maxMat(A) -% finds the x/y indices of the max value of a matrix -[my,iyx] = max(A,[],1); -[Max,ix] = max(my); -iy = iyx(ix); - function [eta,Z] = dummyHierarchical(y,K,options) % This function operates a (dummy) hierarchical clustering algorthm, which % stops whenever the desired number of clusters is attained. This is used @@ -463,7 +457,7 @@ end d = -dist(eta); for i = 1:n-K - [i1,i2] = maxMat(d-diag(Inf*ones(size(eta,2),1))); + [~,i1,i2] = VBA_maxMat(d-diag(Inf*ones(size(eta,2),1))); newZ = eye(size(eta,2)); newZ(min([i1,i2]),max([i1,i2])) = 1; newZ(max([i1,i2]),:) = []; @@ -495,14 +489,6 @@ d(i,:) = sum(tmp,1); end -function [E,V] = Dirichlet_moments(a) -% derives the firs- and second-order moments of a Dirichlet density -a0 = sum(a); -E = a./a0; -V = -a*a'; -V = V - diag(diag(V)) + diag(a.*(a0-a)); -V = V./((a0+1)*a0^2); - function colors = getColors(n) hf = figure('visible','off'); ha = axes('parent',hf); diff --git a/classification/GMM/VBA_projectMoG.m b/modules/classification/GMM/VBA_projectMoG.m similarity index 99% rename from classification/GMM/VBA_projectMoG.m rename to modules/classification/GMM/VBA_projectMoG.m index 1b7fabd3..bd316901 100644 --- a/classification/GMM/VBA_projectMoG.m +++ b/modules/classification/GMM/VBA_projectMoG.m @@ -98,7 +98,7 @@ text(muk(1,k),muk(2,k),num2str(k),'color',col(k,:),'parent',handles.ha,'HorizontalAlignment','center','VerticalAlignment','middle','FontSize',14); end grid(handles.ha,'on') -try;getSubplots;end +try;VBA_getSubplots ();end function lp = getLogNormpdf(muk,vark,X,Y) ng = length(X); diff --git a/classification/GMM/VBEM_GM.m b/modules/classification/GMM/VBEM_GM.m similarity index 99% rename from classification/GMM/VBEM_GM.m rename to modules/classification/GMM/VBEM_GM.m index 4f888598..5ad7b32a 100644 --- a/classification/GMM/VBEM_GM.m +++ b/modules/classification/GMM/VBEM_GM.m @@ -332,7 +332,7 @@ eta = y; while size(Z,1) > K C = corrcoef(eta); - [i,j] = maxMat(C-diag(Inf*ones(n,1))); + [~,i,j] = VBA_maxMat(C-diag(Inf*ones(n,1))); newZ = eye(n); newZ(i,j) = 1; newZ(j,:) = []; diff --git a/classification/GMM/dist.m b/modules/classification/GMM/dist.m similarity index 100% rename from classification/GMM/dist.m rename to modules/classification/GMM/dist.m diff --git a/classification/GMM/generateGMM.m b/modules/classification/GMM/generateGMM.m similarity index 100% rename from classification/GMM/generateGMM.m rename to modules/classification/GMM/generateGMM.m diff --git a/classification/GMM/plotResults.m b/modules/classification/GMM/plotResults.m similarity index 100% rename from classification/GMM/plotResults.m rename to modules/classification/GMM/plotResults.m diff --git a/VBA_classification.m b/modules/classification/VBA_classification.m similarity index 97% rename from VBA_classification.m rename to modules/classification/VBA_classification.m index 60774163..ba926da5 100644 --- a/VBA_classification.m +++ b/modules/classification/VBA_classification.m @@ -67,12 +67,12 @@ % check basic numerical requirements try - if isweird(y) + if VBA_isWeird (y) disp('Error: data contains weird values!') return end end -if ~isbinary(y) +if ~ VBA_isBinary (y) disp('Error: data should be binary!') return end @@ -104,12 +104,11 @@ dim.n_theta = 0; dim.n = 0; g_fname = @g_classif0; -options.binomial = 1; +options.sources = struct('type',1,'out',1); options.DisplayWin = 0; options.verbose = 0; options.inG.X = X'; options.inG.sparse = sparse; -options.inG.sparseP = 1; options.n0 = 0; % number of dummy counts if ~isequal(k,0) % performing cross-validation scheme @@ -140,7 +139,7 @@ acc(itest) = [y(itest)==ytest]; acc0(itest) = out.fit.acc; if sparse - P(:,i) = sparseTransform(posterior.muPhi,options.inG.sparseP); + P(:,i) = VBA_sparsifyPrior (posterior.muPhi); else P(:,i) = posterior.muPhi; end diff --git a/stats&plots/RFT_Euler.m b/modules/random_field_theory/RFT_Euler.m similarity index 100% rename from stats&plots/RFT_Euler.m rename to modules/random_field_theory/RFT_Euler.m diff --git a/stats&plots/RFT_GLM_contrast.m b/modules/random_field_theory/RFT_GLM_contrast.m similarity index 99% rename from stats&plots/RFT_GLM_contrast.m rename to modules/random_field_theory/RFT_GLM_contrast.m index 28dadeaa..7ff3c166 100644 --- a/stats&plots/RFT_GLM_contrast.m +++ b/modules/random_field_theory/RFT_GLM_contrast.m @@ -45,7 +45,7 @@ % check basic numerical requirements try - if isweird(y) + if VBA_isWeird (y) disp('Error: data contains weird values!') return end diff --git a/stats&plots/RFT_Gtf.m b/modules/random_field_theory/RFT_Gtf.m similarity index 100% rename from stats&plots/RFT_Gtf.m rename to modules/random_field_theory/RFT_Gtf.m diff --git a/stats&plots/RFT_Pval.m b/modules/random_field_theory/RFT_Pval.m similarity index 100% rename from stats&plots/RFT_Pval.m rename to modules/random_field_theory/RFT_Pval.m diff --git a/stats&plots/RFT_ReDisplay.m b/modules/random_field_theory/RFT_ReDisplay.m similarity index 99% rename from stats&plots/RFT_ReDisplay.m rename to modules/random_field_theory/RFT_ReDisplay.m index 05d4dd83..cfcc1d22 100644 --- a/stats&plots/RFT_ReDisplay.m +++ b/modules/random_field_theory/RFT_ReDisplay.m @@ -102,7 +102,7 @@ 'string',str,'horizontalAlignment','left','FontSize',11,'parent',out.hf); -try,getSubplots,end +try, VBA_getSubplots (); end diff --git a/stats&plots/RFT_clusters.m b/modules/random_field_theory/RFT_clusters.m similarity index 95% rename from stats&plots/RFT_clusters.m rename to modules/random_field_theory/RFT_clusters.m index c6ec87e3..6b1dc9ca 100644 --- a/stats&plots/RFT_clusters.m +++ b/modules/random_field_theory/RFT_clusters.m @@ -15,11 +15,11 @@ try,verbose;catch,verbose=0;end -X = vec(X); +X = VBA_vec(X); % 1- find upcrossing clusters induced by xc -B = [0;vec(X>xc);0]; +B = [0;VBA_vec(X>xc);0]; n = length(X); -in = vec(find(B==1)); +in = VBA_vec(find(B==1)); if isempty(in) clusters = []; nc = 0; diff --git a/stats&plots/RFT_expectedTopo.m b/modules/random_field_theory/RFT_expectedTopo.m similarity index 98% rename from stats&plots/RFT_expectedTopo.m rename to modules/random_field_theory/RFT_expectedTopo.m index 04bf4bc0..2bce0f0f 100644 --- a/stats&plots/RFT_expectedTopo.m +++ b/modules/random_field_theory/RFT_expectedTopo.m @@ -15,7 +15,7 @@ try;c;catch;c=1;end try,type;catch;type='norm';end try,dof;catch;dof=NaN;end -u = vec(u); +u = VBA_vec(u); switch type case 'norm' P = 1-VBA_spm_Ncdf(u,0,1); diff --git a/stats&plots/RFT_localmax.m b/modules/random_field_theory/RFT_localmax.m similarity index 97% rename from stats&plots/RFT_localmax.m rename to modules/random_field_theory/RFT_localmax.m index 863f787b..c4080606 100644 --- a/stats&plots/RFT_localmax.m +++ b/modules/random_field_theory/RFT_localmax.m @@ -17,7 +17,7 @@ X1 = [-Inf;X]; X2 = [X;-Inf]; imax = intersect(find(diff(X1)>0),find(diff(X2)<0)); -imax = vec(imax); +imax = VBA_vec(imax); if ~isequal(imax(1),1) && X(1)>X(2) imax = [1;imax]; diff --git a/stats&plots/RFT_main.m b/modules/random_field_theory/RFT_main.m similarity index 98% rename from stats&plots/RFT_main.m rename to modules/random_field_theory/RFT_main.m index a590c08e..ed75bf12 100644 --- a/stats&plots/RFT_main.m +++ b/modules/random_field_theory/RFT_main.m @@ -105,7 +105,7 @@ end if ~isempty(options.mask) - if length(options.mask)~=length(X) || ~isequal(vec(unique(options.mask)),[0;1]) + if length(options.mask)~=length(X) || ~isequal(VBA_vec(unique(options.mask)),[0;1]) disp(['RFT-1D: error: invalid mask provided!']) out = []; return @@ -115,7 +115,7 @@ out.options = options; out.verbose = verbose; -X = vec(X); +X = VBA_vec(X); if isempty(options.mask) L = length(X); else diff --git a/stats&plots/RFT_rescaling.m b/modules/random_field_theory/RFT_rescaling.m similarity index 100% rename from stats&plots/RFT_rescaling.m rename to modules/random_field_theory/RFT_rescaling.m diff --git a/stats&plots/RFT_smoothness.m b/modules/random_field_theory/RFT_smoothness.m similarity index 100% rename from stats&plots/RFT_smoothness.m rename to modules/random_field_theory/RFT_smoothness.m diff --git a/subfunctions/RecToMfunction.m b/modules/theory_of_mind/RecToMfunction.m similarity index 93% rename from subfunctions/RecToMfunction.m rename to modules/theory_of_mind/RecToMfunction.m index 222d0b55..f5c933ef 100644 --- a/subfunctions/RecToMfunction.m +++ b/modules/theory_of_mind/RecToMfunction.m @@ -72,7 +72,7 @@ % max-entropic belief about opponent's level Pk = 1./level.*ones(1,level); if level>1 - fx(1:(level-1)) = invsigmoid(Pk(1:(end-1))); + fx(1:(level-1)) = VBA_sigmoid(Pk(1:(end-1)), 'inverse', true); end else % trial OK ot = u(1); % opponent's last move @@ -89,14 +89,14 @@ Vx(j) = Sig'*df.^2; % V[x(theta)|k'=j-1] end % derive E[log sigm(x(theta))] -> correction of Devaine et al. (20014)! - Els1 = Elogsig(f,Vx); - Els0 = Elogsig(-f,Vx); + Els1 = VBA_Elogsig(f,Vx); + Els0 = VBA_Elogsig(-f,Vx); % get prior P(k') - P0 = sigmoid(x(1:(level-1))); % P(k), with k=0,...,k'-2 + P0 = VBA_sigmoid(x(1:(level-1))); % P(k), with k=0,...,k'-2 P0 = [P0;max(0,1-sum(P0))]; % insert last P(k=k'-1) % partial forgetting of prior belief on opponent's level? if inF.diluteP % [not in Devaine et al. (20014)!] - dc = sigmoid(P(2)); % dilution coefficient + dc = VBA_sigmoid(P(2)); % dilution coefficient P0 = (1-dc).*P0 + dc./(level.*ones(level,1)); end % update P(k') @@ -104,7 +104,7 @@ Pk = P0.*exp(w.*LL); Pk = Pk./sum(Pk); % posterior P(k') % store posterior P(k') in hidden-states vector - fx(1:(level-1)) = invsigmoid(Pk(1:(end-1))); % only k'